AmendHub

Download

jcs

/

subtext

/

uthread.c

 

(View History)

jcs   uthread: Add uthread_wakeup Latest amendment: 350 on 2023-03-02

1 /*
2 * Copyright (c) 2021 joshua stein <jcs@jcs.org>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <stdarg.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <setjmp.h>
22
23 #include "uthread.h"
24 #include "util.h"
25
26 #define COORDINATOR_STACK_SIZE (1024UL * 100UL)
27 #define STACK_SIZE (1024UL * 50UL)
28 #define STACK_GAP 1024
29 #define STACK_GAP_FILL 0xff
30 #define CANARY 0xdeadf00d
31
32 jmp_buf uthread_coord_env;
33 static struct uthread uthreads[NUM_UTHREADS];
34 struct uthread *uthread_current = NULL;
35 static Ptr main_stack_gap = NULL;
36 static char uthread_stack_gap_fill[STACK_GAP];
37 unsigned long uthread_ApplLimit = 0;
38
39 void uthread_begin(struct uthread *uthread);
40
41 /*
42 * | A5 world |
43 * |-------------------| 0x26234E CurStackBase (stacks_top)
44 * | main stack | COORDINATOR_STACK_SIZE
45 * |-------------------| 0x260F4E
46 * | main stack gap | STACK_GAP
47 * |-------------------| 0x260C00 (main_stack_gap, upward)
48 * : (other threads) :
49 * |-------------------| 0x25DC00 ((STACK_SIZE - STACK_GAP) downward)
50 * | uthread 1 stack |
51 * | |
52 * |-------------------|
53 * | uthread 1 gap | 0x25D001 (STACK_GAP upward)
54 * |-------------------| 0x25D000 (downward)
55 * | uthread 0 stack |
56 * | |
57 * |-------------------|
58 * | uthread 0 gap | 0x25C401 (STACK_GAP upward)
59 * |-------------------| 0x25C400 ApplLimit (stacks_bottom)
60 * | |
61 * | heap |
62 * | |
63 * |-------------------| 0x16F5D8 ApplZone
64 */
65 void
66 uthread_init(void)
67 {
68 unsigned long stacks_top, stacks_bottom;
69 short i;
70
71 stacks_top = (unsigned long)CurStackBase;
72 main_stack_gap = (Ptr)(stacks_top - COORDINATOR_STACK_SIZE -
73 STACK_GAP);
74 /* align uthread stacks */
75 main_stack_gap = (Ptr)((unsigned long)main_stack_gap -
76 ((unsigned long)main_stack_gap % 1024));
77
78 stacks_bottom = (unsigned long)main_stack_gap -
79 (STACK_SIZE * NUM_UTHREADS);
80
81 if (stacks_bottom > stacks_top)
82 panic("stacks_bottom > stacks_top");
83
84 uthread_ApplLimit = stacks_bottom - (1024UL * 64);
85 if (uthread_ApplLimit < (unsigned long)ApplZone)
86 panic("stacks_bottom < ApplZone");
87
88 SetApplLimit((Ptr)uthread_ApplLimit);
89 if (MemError())
90 panic("Failed to SetApplLimit to %lu", uthread_ApplLimit);
91
92 /* color in our whole uthread stack space */
93 memset((Ptr)uthread_ApplLimit, STACK_GAP_FILL,
94 (unsigned long)main_stack_gap + STACK_GAP - uthread_ApplLimit);
95 /* this is what we'll compare each gap to */
96 memset(uthread_stack_gap_fill, STACK_GAP_FILL, STACK_GAP);
97
98 for (i = 0; i < NUM_UTHREADS; i++) {
99 uthreads[i].id = i;
100 uthreads[i].state = UTHREAD_STATE_DEAD;
101 uthreads[i].stack_loc = stacks_bottom + (STACK_SIZE * (i + 1)) - 2;
102 uthreads[i].stack_gap = uthreads[i].stack_loc - STACK_SIZE + 2;
103 }
104
105 uthread_verify();
106 }
107
108 struct uthread *
109 uthread_add(void *func, void *arg)
110 {
111 short i;
112
113 for (i = 0; i < NUM_UTHREADS; i++) {
114 if (uthreads[i].state != UTHREAD_STATE_DEAD)
115 continue;
116
117 uthreads[i].func = func;
118 uthreads[i].arg = arg;
119 uthreads[i].state = UTHREAD_STATE_SETUP;
120
121 /* color in stack gap just to be sure */
122 memset((Ptr)uthreads[i].stack_gap, STACK_GAP_FILL, STACK_GAP);
123
124 return &uthreads[i];
125 }
126
127 return NULL;
128 }
129
130 void
131 uthread_yield(void)
132 {
133 volatile long magic = CANARY;
134 register unsigned long _sp = 0;
135
136 asm {
137 move.l a7, _sp
138 };
139
140 if (uthread_current == NULL)
141 panic("uthread_yield not from a thread!");
142
143 uthread_verify();
144
145 if ((uthread_current->stack_loc - _sp) > (STACK_SIZE / 2))
146 panic("thread %d stack growing too large "
147 "[SP=0x%lx] [stack=0x%lx] [gap=0x%lx-0x%lx]",
148 uthread_current->id, _sp, uthread_current->stack_loc,
149 uthread_current->stack_gap,
150 uthread_current->stack_gap + STACK_GAP);
151
152 if (uthread_current->state != UTHREAD_STATE_SLEEPING)
153 uthread_current->state = UTHREAD_STATE_YIELDING;
154
155 if (setjmp(uthread_current->env) == 0)
156 longjmp(uthread_coord_env, UTHREAD_SETJMP_YIELDED);
157 /* will not return */
158
159 if (magic != CANARY)
160 panic("uthread stack canary dead!");
161 }
162
163 void
164 uthread_msleep(unsigned long millis)
165 {
166 /* Ticks is in 1/60 of a second, but it's easier to think in millis */
167 unsigned long ts = millis / ((double)1000 / (double)60);
168
169 uthread_current->state = UTHREAD_STATE_SLEEPING;
170 uthread_current->sleeping_until = Ticks + ts;
171 uthread_yield();
172 }
173
174 void
175 uthread_wakeup(struct uthread *uthread)
176 {
177 uthread->sleeping_until = 0;
178 uthread_yield();
179 }
180
181 void
182 uthread_begin(struct uthread *uthread)
183 {
184 register unsigned long stack_loc = uthread->stack_loc;
185 asm {
186 move.l stack_loc, a7
187 };
188
189 uthread->func(uthread, uthread->arg);
190
191 uthread_verify();
192
193 /* uthread variable is probably trashed at this point */
194 if (uthread_current->state == UTHREAD_STATE_REPEAT)
195 uthread_current->state = UTHREAD_STATE_SETUP;
196 else
197 uthread_current->state = UTHREAD_STATE_DEAD;
198
199 longjmp(uthread_coord_env, UTHREAD_SETJMP_YIELDED);
200 }
201
202 void
203 uthread_coordinate(void)
204 {
205 short i;
206
207 for (i = 0; i < NUM_UTHREADS; i++) {
208 if (uthreads[i].state == UTHREAD_STATE_DEAD)
209 continue;
210 if (uthreads[i].state == UTHREAD_STATE_SLEEPING &&
211 Ticks < uthreads[i].sleeping_until)
212 continue;
213
214 switch (setjmp(uthread_coord_env)) {
215 case UTHREAD_SETJMP_DEFAULT:
216 uthread_current = &uthreads[i];
217
218 switch (uthread_current->state) {
219 case UTHREAD_STATE_SETUP:
220 uthread_current->state = UTHREAD_STATE_RUNNING;
221 uthread_begin(uthread_current);
222 break;
223 case UTHREAD_STATE_YIELDING:
224 case UTHREAD_STATE_SLEEPING:
225 uthread_current->state = UTHREAD_STATE_RUNNING;
226 longjmp(uthread_current->env, UTHREAD_SETJMP_YIELDED);
227 break;
228 }
229 break;
230 case UTHREAD_SETJMP_YIELDED:
231 break;
232 default:
233 panic("unknown uthread state");
234 }
235 }
236
237 uthread_current = NULL;
238 uthread_verify();
239 }
240
241 void
242 uthread_verify(void)
243 {
244 short i;
245 unsigned char *gap;
246
247 gap = (unsigned char *)main_stack_gap;
248 if (memcmp(gap, uthread_stack_gap_fill, STACK_GAP) != 0)
249 panic("coordinator spilled into stack gap");
250
251 for (i = 0; i < NUM_UTHREADS; i++) {
252 if (uthreads[i].state == UTHREAD_STATE_DEAD)
253 continue;
254
255 gap = (unsigned char *)(uthreads[i].stack_gap);
256 if (memcmp(gap, uthread_stack_gap_fill, STACK_GAP) != 0)
257 panic("thread %d spilled into stack gap", i);
258 }
259 }