17 #define RLIMIT_STACK_VAL 0x800000
20 * memory map for processes
21 * 0xFFFF FFFF F800 0000 and up, kernel, can copy kernel pml4 entries
22 * program load at 4 MB
23 * kernel stack at 128 TB 7F...
24 * stack at 127 TB = 8000 0000 0000 (and down)/ thread stacks go somewhere
25 * stacks could be limited to physmem. perhaps it's ok for each address space
26 * to have it's own total stacks limit
27 * mmaps at 126 TB = 7E00 0000 0000 (and down)
28 * heap at 96 TB = 6000 0000 0000 (and up)
30 * heap after program load...
31 * program at 4 MB = 40 0000
34 /* so, to create a process,
35 * allocate 2 MB for stack,
36 * allocate whatever is needed for the code
37 * load in the code to the code base
38 * set up the registers
39 * set up the stack for the iretq or sysret
40 * iretq or sysret to start the task
43 /* current process is per processor? */
44 struct process *current_task = 0;
45 static struct process *available = 0; /* a stack of unused task structs */
46 static struct process mainTask;
47 static struct process *idle_task;
48 static void idle_task_main() {
58 struct spinlock_t lock;
62 static struct pqueue sleepqueue;
63 static struct pqueue runqueue;
64 static struct pqueue terminating;
66 void initqueue(struct pqueue *q) {
67 q->lock = (struct spinlock_t){0};
73 void dumpqueue(struct pqueue *q, char *name) {
77 printk("%u %s:", timer_ticks, name);
79 printk("%u queue %llx:", timer_ticks, q);
82 for (task = q->head; task; task = task->next) {
83 printk(" %u", task->pid);
85 printk(":%u", task->sleep);
91 /* dequeue sleeper is no different than a regular dequeue */
92 void enqueue_sleeper(struct pqueue *q, struct process *task) {
93 struct process *sleeper;
95 spinlock_acquire(&q->lock);
104 for (sleeper = q->head; sleeper; sleeper = sleeper->next) {
105 if (task->sleep < sleeper->sleep) {
106 /* insert before this one */
107 task->next = sleeper;
108 task->prev = sleeper->prev;
109 if (sleeper != q->head) {
110 sleeper->prev->next = task;
114 sleeper->prev = task;
120 /* if we got here, we're the last task */
122 task->prev = q->head->prev;
123 task->prev->next = task;
124 q->head->prev = task;
130 spinlock_release(&q->lock);
133 void enqueue(struct pqueue *q, struct process *task) {
134 spinlock_acquire(&q->lock);
138 task->prev = q->head->prev;
139 task->prev->next = task;
140 q->head->prev = task;
147 spinlock_release(&q->lock);
150 struct process *dequeue(struct pqueue *q) {
151 struct process *task;
153 spinlock_acquire(&q->lock);
156 q->head = task->next;
158 q->head->prev = task->prev;
164 spinlock_release(&q->lock);
168 void enqueue_runnable(struct process *task) {
169 enqueue(&runqueue, task);
172 struct process *dequeue_runnable(void) {
173 return dequeue(&runqueue);
177 return current_task->pid;
180 void exit(int status) {
181 current_task->status = status;
183 /* put in parents process "exited" queue for wait() */
187 void terminate(struct process *p) {
189 /* remove it from whatever queue it's in */
190 /* enqueue it into terminating queue */
192 /* push it on the top of the runqueue */
193 enqueue(&terminating, p);
194 /* free all memory, except maybe kernel stack */
196 /* remove from task queues */
198 /* add to available */
204 void sleep(uint32_t ticks) {
205 if (current_task->sleep) {
206 panic("tried to sleep pid %u which was already sleeping until %u\n", current_task->pid, current_task->sleep);
208 current_task->sleep = timer_ticks + ticks;
209 enqueue_sleeper(&sleepqueue, current_task);
210 current_task->flags &= ~TM_RUNNABLE;
214 static void third_main() {
217 printk("%llu: Hello from A pid %lu, cpl = %x\n", timer_ticks, getpid(), current_task->pl);
218 sleep(300+300 * getpid());
222 static void other_main() {
224 printk("%llu: Hello from B pid %lu\n", timer_ticks, getpid());
231 static void hog_main() {
233 static volatile uint64_t donetil = 0;
234 printk("first scheduled hog\n");
237 if (timer_ticks >= donetil) {
238 printk("%llu hog %u running\n", timer_ticks,current_task->pid);
245 struct interrupt_handler pih;
247 static void preemption_interrupt(struct interrupt_context *c, void *n) {
248 struct process *sleeper;
250 /* check for sleepers */
251 while (sleepqueue.head && sleepqueue.head->sleep <= timer_ticks) {
252 sleeper = dequeue(&sleepqueue);
254 sleeper->flags |= TM_RUNNABLE;
255 enqueue_runnable(sleeper);
258 if (current_task != idle_task) {
259 current_task->quantum--;
264 if (current_task->quantum == 0) {
265 if (current_task != idle_task) {
266 if (current_task->flags & TM_RUNNABLE) {
267 enqueue_runnable(current_task);
277 void init_tasking() {
278 struct process *task;
280 initqueue(&runqueue);
281 initqueue(&sleepqueue);
282 initqueue(&terminating);
284 mainTask.reg.cr3 = getcr3();
285 mainTask.reg.rflags = getrflags();
286 mainTask.reg.cs = 0x10;
287 mainTask.reg.ss = 0x18;
292 idle_task = new_task(idle_task_main, mainTask.reg.rflags, MEM_KERNEL, TASK_KERNEL|TASK_NOSCHED);
294 /* TODO move these into a dummy/test function */
295 task = new_task(other_main, mainTask.reg.rflags, MEM_KERNEL, TASK_KERNEL);
296 task = new_task(third_main, mainTask.reg.rflags, MEM_KERNEL, TASK_KERNEL);
297 task = new_task(third_main, mainTask.reg.rflags, MEM_KERNEL, TASK_KERNEL);
298 task = new_task(third_main, mainTask.reg.rflags, MEM_KERNEL, TASK_KERNEL);
300 task = new_task(hog_main, mainTask.reg.rflags, mainTask.reg.cr3, TASK_KERNEL);
303 task = new_task(usermain, mainTask.reg.rflags, create_addrspace(), 0);
304 current_task = &mainTask;
305 pih.handler = preemption_interrupt;
307 interrupt_add_handler(IRQ0, &pih);
308 printk("set up tasking\n");
311 struct process *new_task(void (*main)(), uint64_t rflags, uint64_t pagedir, uint64_t flags) {
314 p = koalloc(sizeof *p);
316 panic("can't allocate memory for new task\n");
319 create_task(p, main, rflags, pagedir, flags);
323 void setup_usertask(void);
325 static pid_t next_pid = 2;
327 pid_t create_task(struct process *task, void (*main)(), uint64_t rflags, uint64_t addrspace, uint64_t flags) {
329 task->reg = (struct registers){ 0 }; /* clear out all registers for new task */
331 /* roll over? re-use? */
332 /* could possible use bts on max nums. possibly wasteful, and still O(n) */
334 task->pid = next_pid++;
337 task->reg.rflags = rflags;
338 task->reg.cr3 = addrspace;
339 task->kstack = (uint64_t)PHY2VIRTP(palloc()); /* new kernel stack */
340 task->reg.rsp = task->kstack + 0x1000;
341 task->reg.rip = (uint64_t)main;
342 task->reg.cs = 0x10; /* kernel code segment */
343 task->reg.ss = 0x18; /* kernel data segment */
344 task->pl = 0; /* not a user task */
345 task->quantum = 0; /* will get a quantum when it's scheduled */
346 task->flags = TM_RUNNABLE; /* new tasks are runnable */
348 if (! (flags & TASK_KERNEL)) {
349 task->main = (int (*)(int,char**))main;
350 task->reg.rdi = (uintptr_t)task->main;
351 task->reg.rip = (uintptr_t)setup_usertask;
352 task->flags |= TM_USER;
355 /* stacks will need thinking */
356 /* stack can't just go at fixed 128 TB, threads need their own */
357 if (! (flags & TASK_KERNEL)) {
358 printk("created user task %u %llx, space = %llx, entry = %llx\n", task->pid, task, task->reg.cr3, task->reg.rip);
361 if (!(flags & TASK_NOSCHED)) {
362 enqueue_runnable(task);
369 void setup_usertask(void) {
370 struct process *task;
373 /* need a sysret trampoline for user tasks */
374 //task->reg.rip = (uint64_t)usermodetrampoline;
375 //task->reg.rcx = (uint64_t)main;
376 // hmm. if this is in kernel space, it's going to fail after the sysret
377 // we need to copy the code into the user space, and hope it's PIC...
379 /* create a 1 meg map for the user code at the 8 GB mark */
380 printk("setting up usertask %u, space = %llx\n", task->pid, getcr3());
381 vmapz(task->reg.cr3, GB(8), MB(1), MEM_USERSPACE|MEM_RW);
382 printk("mapped 1 MB for user\n");
383 memmove((void *)GB(8), task->main, KB(4));
384 printk("copied main func from %llx to %llx\n", task->main, GB(8));
385 /* copy the user main function to the GB 8 mark */
386 /* hmm, not mapped here..., so have to map it in the trampoline
387 * so it has the user mappings
389 /* map it into kernel space? will need a mutex on that */
390 /* then just copy the tables? */
392 task->reg.rip = GB(8);
400 /* create the user mode stack */
401 task->stacks = TB(127);
402 vmapz(task->reg.cr3, task->stacks - MB(2), MB(2), MEM_USERSPACE);
403 task->usp = task->stacks; /* user stack pointer */
405 /* What if this fails? */
406 /* need a kernel mode stack for syscalls, and possibly interrupts */
408 task->kstack = TB(128);
409 printk("creating 4 KB kernel stack at virtual address %llx\n", task->kstack-KB(4));
410 vmapz(addrspace, task->kstack - KB(4), KB(4), MEM_RW|MAP_TRACE);
411 task->reg.rsp = task->kstack;
413 printk("user task %u, kstack rsp = %llx\n", task->pid, task->kstack);
414 //print_decode(0, task->reg.cr3, task->reg.rsp-8);
416 test_address((void *)(task->reg.rsp - 8));
419 /* this won't return */
420 printk("user tramp\n");
421 /* we loaded to the 8 GB mark... */
422 usermodetrampoline( (void (*)()) GB(8), task->reg.rflags);
425 void schedule(void) {
426 current_task->flags |= TM_SCHEDULE;
429 /* set the current task for scheduling, and if we're not in an interrupt,
430 * immediately re-schedule. could just halt for now, which will wait for the
435 /* TODO check if in interrupt. if not, then call one? */
439 int need_schedule(void) {
440 return current_task ? current_task->flags & TM_SCHEDULE : 0;
443 /* probably want to use lock free queue with compare and swap
444 * see http://preshing.com/20120612/an-introduction-to-lock-free-programming/
447 void do_schedule(void) {
448 struct process *prev, *next;
452 next = dequeue_runnable();
454 /* no runnable task */
458 next->flags &= ~TM_SCHEDULE; /* clear the schedule flag */
459 next->quantum = 10; /* smaller slice for same task */
461 /* don't run though the trouble if we're not changing tasks */
464 dumpqueue(&runqueue, "runqueue");
466 switch_task(&prev->reg, &next->reg);