/* * Copyright (c) 2021 joshua stein * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "uthread.h" #include "util.h" #define COORDINATOR_STACK_SIZE (1024UL * 100UL) #define STACK_SIZE (1024UL * 50UL) #define STACK_GAP 1024 #define STACK_GAP_FILL 0xff #define CANARY 0xdeadf00d jmp_buf uthread_coord_env; static struct uthread uthreads[NUM_UTHREADS]; struct uthread *uthread_current = NULL; static Ptr main_stack_gap = NULL; static char uthread_stack_gap_fill[STACK_GAP]; unsigned long uthread_ApplLimit = 0; void uthread_begin(struct uthread *uthread); /* * | A5 world | * |-------------------| 0x26234E CurStackBase (stacks_top) * | main stack | COORDINATOR_STACK_SIZE * |-------------------| 0x260F4E * | main stack gap | STACK_GAP * |-------------------| 0x260C00 (main_stack_gap, upward) * : (other threads) : * |-------------------| 0x25DC00 ((STACK_SIZE - STACK_GAP) downward) * | uthread 1 stack | * | | * |-------------------| * | uthread 1 gap | 0x25D001 (STACK_GAP upward) * |-------------------| 0x25D000 (downward) * | uthread 0 stack | * | | * |-------------------| * | uthread 0 gap | 0x25C401 (STACK_GAP upward) * |-------------------| 0x25C400 ApplLimit (stacks_bottom) * | | * | heap | * | | * |-------------------| 0x16F5D8 ApplZone */ void uthread_init(void) { unsigned long stacks_top, stacks_bottom; short i; stacks_top = (unsigned long)CurStackBase; main_stack_gap = (Ptr)(stacks_top - COORDINATOR_STACK_SIZE - STACK_GAP); /* align uthread stacks */ main_stack_gap = (Ptr)((unsigned long)main_stack_gap - ((unsigned long)main_stack_gap % 1024)); stacks_bottom = (unsigned long)main_stack_gap - (STACK_SIZE * NUM_UTHREADS); if (stacks_bottom > stacks_top) panic("stacks_bottom > stacks_top"); uthread_ApplLimit = stacks_bottom - (1024UL * 64); if (uthread_ApplLimit < (unsigned long)ApplZone) panic("stacks_bottom < ApplZone"); SetApplLimit((Ptr)uthread_ApplLimit); if (MemError()) panic("Failed to SetApplLimit to %lu", uthread_ApplLimit); /* color in our whole uthread stack space */ memset((Ptr)uthread_ApplLimit, STACK_GAP_FILL, (unsigned long)main_stack_gap + STACK_GAP - uthread_ApplLimit); /* this is what we'll compare each gap to */ memset(uthread_stack_gap_fill, STACK_GAP_FILL, STACK_GAP); for (i = 0; i < NUM_UTHREADS; i++) { uthreads[i].id = i; uthreads[i].state = UTHREAD_STATE_DEAD; uthreads[i].stack_loc = stacks_bottom + (STACK_SIZE * (i + 1)) - 2; uthreads[i].stack_gap = uthreads[i].stack_loc - STACK_SIZE + 2; } uthread_verify(); } struct uthread * uthread_add(void *func, void *arg) { short i; for (i = 0; i < NUM_UTHREADS; i++) { if (uthreads[i].state != UTHREAD_STATE_DEAD) continue; uthreads[i].func = func; uthreads[i].arg = arg; uthreads[i].state = UTHREAD_STATE_SETUP; /* color in stack gap just to be sure */ memset((Ptr)uthreads[i].stack_gap, STACK_GAP_FILL, STACK_GAP); return &uthreads[i]; } return NULL; } void uthread_yield(void) { volatile long magic = CANARY; register unsigned long _sp = 0; asm { move.l a7, _sp }; if (uthread_current == NULL) panic("uthread_yield not from a thread!"); uthread_verify(); if ((uthread_current->stack_loc - _sp) > (STACK_SIZE / 2)) panic("thread %d stack growing too large " "[SP=0x%lx] [stack=0x%lx] [gap=0x%lx-0x%lx]", uthread_current->id, _sp, uthread_current->stack_loc, uthread_current->stack_gap, uthread_current->stack_gap + STACK_GAP); if (uthread_current->state != UTHREAD_STATE_SLEEPING) uthread_current->state = UTHREAD_STATE_YIELDING; if (setjmp(uthread_current->env) == 0) longjmp(uthread_coord_env, UTHREAD_SETJMP_YIELDED); /* will not return */ if (magic != CANARY) panic("uthread stack canary dead!"); } void uthread_msleep(unsigned long millis) { /* Ticks is in 1/60 of a second, but it's easier to think in millis */ unsigned long ts = millis / ((double)1000 / (double)60); uthread_current->state = UTHREAD_STATE_SLEEPING; uthread_current->sleeping_until = Ticks + ts; uthread_yield(); } void uthread_wakeup(struct uthread *uthread) { uthread->sleeping_until = 0; uthread_yield(); } void uthread_begin(struct uthread *uthread) { register unsigned long stack_loc = uthread->stack_loc; asm { move.l stack_loc, a7 }; uthread->func(uthread, uthread->arg); uthread_verify(); /* uthread variable is probably trashed at this point */ if (uthread_current->state == UTHREAD_STATE_REPEAT) uthread_current->state = UTHREAD_STATE_SETUP; else uthread_current->state = UTHREAD_STATE_DEAD; longjmp(uthread_coord_env, UTHREAD_SETJMP_YIELDED); } void uthread_coordinate(void) { short i; for (i = 0; i < NUM_UTHREADS; i++) { if (uthreads[i].state == UTHREAD_STATE_DEAD) continue; if (uthreads[i].state == UTHREAD_STATE_SLEEPING && Ticks < uthreads[i].sleeping_until) continue; switch (setjmp(uthread_coord_env)) { case UTHREAD_SETJMP_DEFAULT: uthread_current = &uthreads[i]; switch (uthread_current->state) { case UTHREAD_STATE_SETUP: uthread_current->state = UTHREAD_STATE_RUNNING; uthread_begin(uthread_current); break; case UTHREAD_STATE_YIELDING: case UTHREAD_STATE_SLEEPING: uthread_current->state = UTHREAD_STATE_RUNNING; longjmp(uthread_current->env, UTHREAD_SETJMP_YIELDED); break; } break; case UTHREAD_SETJMP_YIELDED: break; default: panic("unknown uthread state"); } } uthread_current = NULL; uthread_verify(); } void uthread_verify(void) { short i; unsigned char *gap; gap = (unsigned char *)main_stack_gap; if (memcmp(gap, uthread_stack_gap_fill, STACK_GAP) != 0) panic("coordinator spilled into stack gap"); for (i = 0; i < NUM_UTHREADS; i++) { if (uthreads[i].state == UTHREAD_STATE_DEAD) continue; gap = (unsigned char *)(uthreads[i].stack_gap); if (memcmp(gap, uthread_stack_gap_fill, STACK_GAP) != 0) panic("thread %d spilled into stack gap", i); } }