00001 #include "local.h" 00002 00003 #include <linux/sched.h> 00004 00005 DEFINE_RWLOCK(tasklist_lock); 00006 00007 asmlinkage void preempt_schedule(void) 00008 { 00009 WARN_UNIMPL; 00010 } 00011 00012 00013 /* Our version of scheduler invocation. 00014 * 00015 * Scheduling is performed by Fiasco, so we don't care about it as long as 00016 * a thread is running. If a task becomes TASK_INTERRUPTIBLE or 00017 * TASK_UNINTERRUPTIBLE, we make sure that the task does not become 00018 * scheduled by locking the task's sleep lock. 00019 */ 00020 asmlinkage void schedule(void) 00021 { 00022 dde26_thread_data *t = lxtask_to_ddethread(current); 00023 00024 switch (current->state) { 00025 case TASK_RUNNING: 00026 ddekit_thread_schedule(); 00027 break; 00028 case TASK_INTERRUPTIBLE: 00029 case TASK_UNINTERRUPTIBLE: 00030 ddekit_sem_down(SLEEP_LOCK(t)); 00031 break; 00032 default: 00033 panic("current->state = %d --- unknown state\n", current->state); 00034 } 00035 } 00036 00037 00038 /** yield the current processor to other threads. 00039 * 00040 * this is a shortcut for kernel-space yielding - it marks the 00041 * thread runnable and calls sys_sched_yield(). 00042 */ 00043 void __sched yield(void) 00044 { 00045 set_current_state(TASK_RUNNING); 00046 ddekit_yield(); 00047 } 00048 00049 00050 /*** 00051 * try_to_wake_up - wake up a thread 00052 * @p: the to-be-woken-up thread 00053 * @state: the mask of task states that can be woken 00054 * @sync: do a synchronous wakeup? 00055 */ 00056 int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) 00057 { 00058 Assert(p); 00059 dde26_thread_data *t = lxtask_to_ddethread(p); 00060 00061 Assert(t); 00062 Assert(SLEEP_LOCK(t)); 00063 00064 p->state = TASK_RUNNING; 00065 ddekit_sem_up(SLEEP_LOCK(t)); 00066 00067 return 0; 00068 } 00069 00070 00071 static void process_timeout(unsigned long data) 00072 { 00073 wake_up_process((struct task_struct *)data); 00074 } 00075 00076 00077 signed long __sched schedule_timeout(signed long timeout) 00078 { 00079 struct timer_list timer; 00080 unsigned long expire = timeout + jiffies; 00081 00082 setup_timer(&timer, process_timeout, (unsigned long)current); 00083 timer.expires = expire; 00084 00085 switch(timeout) 00086 { 00087 /* 00088 * Hah! 00089 * 00090 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 00091 * the CPU away without a bound on the timeout. In this case the return 00092 * value will be %MAX_SCHEDULE_TIMEOUT. 00093 */ 00094 case MAX_SCHEDULE_TIMEOUT: 00095 schedule(); 00096 break; 00097 default: 00098 add_timer(&timer); 00099 schedule(); 00100 del_timer(&timer); 00101 break; 00102 } 00103 00104 timeout = expire - jiffies; 00105 00106 return timeout < 0 ? 0 : timeout; 00107 } 00108 00109 00110 signed long __sched schedule_timeout_interruptible(signed long timeout) 00111 { 00112 __set_current_state(TASK_INTERRUPTIBLE); 00113 return schedule_timeout(timeout); 00114 } 00115 00116 00117 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 00118 { 00119 __set_current_state(TASK_UNINTERRUPTIBLE); 00120 return schedule_timeout(timeout); 00121 } 00122 00123 /** Tasks may be forced to run only on a certain no. of CPUs. Since 00124 * we only emulate a SMP-environment for the sake of having multiple 00125 * threads, we do not need to implement this. 00126 */ 00127 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 00128 { 00129 return 0; 00130 } 00131 00132 void set_user_nice(struct task_struct *p, long nice) 00133 { 00134 //WARN_UNIMPL; 00135 } 00136 00137 void __sched io_schedule(void) 00138 { 00139 WARN_UNIMPL; 00140 } 00141 00142 long __sched io_schedule_timeout(long timeout) 00143 { 00144 WARN_UNIMPL; 00145 return -1; 00146 } 00147 00148 extern int sched_setscheduler_nocheck(struct task_struct *t, int flags, 00149 struct sched_param *p) 00150 { 00151 WARN_UNIMPL; 00152 return -1; 00153 } 00154 00155 void ignore_signals(struct task_struct *t) { }
1.5.6