00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014 #include <agnix/agnix.h>
00015 #include <agnix/init.h>
00016 #include <agnix/list.h>
00017 #include <agnix/queues.h>
00018 #include <agnix/tasks.h>
00019 #include <agnix/math64.h>
00020 #include <agnix/console.h>
00021 #include <agnix/memory.h>
00022 #include <agnix/spinlock.h>
00023 #include <asm/irq.h>
00024
00025 struct list_head running_tasks_list;
00026 struct list_head tasks_list;
00027 spinlock_t running_list_lock;
00028 spinlock_t tasks_list_lock;
00029
00030 extern struct task_s *idle_task;
00031
00032 void sched_queue_task(void)
00033 {
00034 }
00035
00036 void sched_activate_task_lock(struct task_s *task)
00037 {
00038 if (!(task->t_state & TASK_STAT_RUNNING)) {
00039 task->t_state |= TASK_STAT_RUNNING;
00040 list_add(&(task->running_task_list), &running_tasks_list);
00041 }
00042 }
00043
00044 void sched_activate_task(struct task_s *task)
00045 {
00046 u32 flags;
00047
00048 spin_lock_irqsave(&running_list_lock, flags);
00049 sched_activate_task_lock(task);
00050 spin_unlock_irqrestore(&running_list_lock, flags);
00051 }
00052
00053 void sched_deactivate_task_lock(struct task_s *task)
00054 {
00055 if (task->t_state & TASK_STAT_RUNNING) {
00056 task->t_state &= ~TASK_STAT_RUNNING;
00057 }
00058 }
00059
00060 void sched_deactivate_task(struct task_s *task)
00061 {
00062 u32 flags;
00063
00064 spin_lock_irqsave(&running_list_lock, flags);
00065 sched_deactivate_task_lock(task);
00066 spin_unlock_irqrestore(&running_list_lock, flags);
00067 }
00068
00069 void sched_add_task_lock(struct task_s *task)
00070 {
00071 list_add(&(task->task_list), &tasks_list);
00072 }
00073
00074 void sched_add_task(struct task_s *task)
00075 {
00076 u32 flags;
00077
00078 spin_lock_irqsave(&running_list_lock, flags);
00079 sched_add_task_lock(task);
00080 spin_unlock_irqrestore(&running_list_lock, flags);
00081 }
00082
00083 void sched_task_can_resched(int can_resched)
00084 {
00085 atomic_write(¤t_task->t_can_resched, can_resched);
00086 }
00087
00088
00089 void reschedule_task(struct task_s *task)
00090 {
00091 list_del(&(task->running_task_list));
00092 list_add_tail(&(task->running_task_list), &running_tasks_list);
00093 }
00094
00095 void schedule_task(void)
00096 {
00097 struct task_s *prev, *next;
00098 struct list_head *head;
00099 u16 prev_tss_entry;
00100 u16 next_tss_entry;
00101 u32 flags;
00102 int prev_pid;
00103
00104 spin_lock_irqsave(&running_list_lock, flags);
00105
00106 prev = current_task;
00107
00108 if ((prev->t_state & TASK_STAT_KILLED) || (!(prev->t_state & TASK_STAT_RUNNING))) {
00109 if (prev->t_pid) {
00110 list_del(&prev->running_task_list);
00111 }
00112 }
00113
00114 if (list_empty(&running_tasks_list)) {
00115 next = idle_task;
00116 } else {
00117 head = running_tasks_list.next;
00118 next = list_entry(head, struct task_s, running_task_list);
00119 }
00120
00121 next->t_state &= ~TASK_STAT_STARTING;
00122 prev_tss_entry = prev->tss_wrap->tss_entry;
00123 next_tss_entry = next->tss_wrap->tss_entry;
00124 prev_pid = prev->t_pid;
00125
00126 prev->t_resched = 0;
00127 prev->t_count = INIT_COUNT;
00128
00129 if (prev->t_state & TASK_STAT_KILLED) {
00130 put_free_pages((u32)prev, 1);
00131 }
00132
00133 if (next != idle_task)
00134 reschedule_task(next);
00135
00136 if (prev_pid != next->t_pid)
00137 task_switch(next_tss_entry);
00138
00139 spin_unlock_irqrestore(&running_list_lock, flags);
00140 }
00141
00142 int __init scheduler_init(void)
00143 {
00144 INIT_LIST_HEAD(&running_tasks_list);
00145 INIT_LIST_HEAD(&tasks_list);
00146
00147 spin_lock_init(&running_list_lock);
00148 spin_lock_init(&tasks_list_lock);
00149
00150 return 0;
00151 }
00152
00153 void scheduler_print_tasks(void)
00154 {
00155 struct list_head *tmp;
00156 struct task_s *task;
00157 u32 flags;
00158
00159 printf("\n");
00160
00161 spin_lock_irqsave(&running_list_lock, flags);
00162
00163 list_for_each(tmp, &tasks_list) {
00164 task = list_entry(tmp, struct task_s, task_list);
00165
00166 printf("%s [pid %d]\n", task->name, task->t_pid);
00167 }
00168
00169 spin_unlock_irqrestore(&running_list_lock, flags);
00170 }