00001
00002
00011
00012
00013
00014
00015
00016
00017 #include <l4/sys/syscalls.h>
00018 #include <l4/thread/thread.h>
00019 #include <l4/semaphore/semaphore.h>
00020
00021 #include <l4/dde_linux/dde.h>
00022
00023
00024 #include <linux/sched.h>
00025
00026
00027 #include "internal.h"
00028 #include "__config.h"
00029 #include "fastcall.h"
00030
00044 static inline int try_to_wake_up(struct task_struct *p, int synchronous)
00045 {
00046 p->state = TASK_RUNNING;
00047
00048
00049
00050 if (current != p)
00051 l4semaphore_up(&p->dde_sem);
00052
00053 return 1;
00054 }
00055
00058 inline int FASTCALL(wake_up_process(struct task_struct *p))
00059 {
00060 return try_to_wake_up(p, 0);
00061 }
00062
00064 static void process_timeout(unsigned long __data)
00065 {
00066 struct task_struct *p = (struct task_struct *) __data;
00067
00068 wake_up_process(p);
00069 }
00070
00073 signed long FASTCALL(schedule_timeout(signed long timeout))
00074 {
00075 struct timer_list timer;
00076 unsigned long expire;
00077
00078 switch (timeout)
00079 {
00080 case MAX_SCHEDULE_TIMEOUT:
00081
00082
00083
00084
00085
00086
00087
00088 schedule();
00089 goto out;
00090 default:
00091
00092
00093
00094
00095
00096
00097
00098 if (timeout < 0)
00099 {
00100 LOG_Error("schedule_timeout: wrong timeout "
00101 "value %lx", timeout);
00102 current->state = TASK_RUNNING;
00103 goto out;
00104 }
00105 }
00106
00107 expire = timeout + jiffies;
00108
00109 init_timer(&timer);
00110 timer.expires = expire;
00111 timer.data = (unsigned long) current;
00112 timer.function = process_timeout;
00113
00114 add_timer(&timer);
00115 schedule();
00116 del_timer_sync(&timer);
00117
00118 timeout = expire - jiffies;
00119
00120 out:
00121 return timeout < 0 ? 0 : timeout;
00122 }
00123
00126 void schedule(void)
00127 {
00128
00129 switch (current->state)
00130 {
00131 case TASK_RUNNING:
00132
00133 #if SCHED_YIELD_OPT
00134 l4thread_usleep(SCHED_YIELD_TO);
00135 #else
00136 l4_yield();
00137 #endif
00138 break;
00139
00140 case TASK_UNINTERRUPTIBLE:
00141 case TASK_INTERRUPTIBLE:
00142
00143 l4semaphore_down(¤t->dde_sem);
00144 break;
00145
00146 default:
00147 Panic("current->state unknown (%ld)\n", current->state);
00148 }
00149 }
00150
00152 static inline void __wake_up_common(wait_queue_head_t * q, unsigned int mode,
00153 int nr_exclusive, const int sync)
00154 {
00155 struct list_head *tmp, *head;
00156 struct task_struct *p;
00157
00158 CHECK_MAGIC_WQHEAD(q);
00159 head = &q->task_list;
00160 WQ_CHECK_LIST_HEAD(head);
00161 tmp = head->next;
00162 while (tmp != head)
00163 {
00164 unsigned int state;
00165 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
00166
00167 tmp = tmp->next;
00168 CHECK_MAGIC(curr->__magic);
00169 p = curr->task;
00170 state = p->state;
00171 if (state & mode)
00172 {
00173 WQ_NOTE_WAKER(curr);
00174 if (try_to_wake_up(p, sync) && curr->flags && !--nr_exclusive)
00175 break;
00176 }
00177 }
00178 }
00179
00182 void FASTCALL(__wake_up(wait_queue_head_t * q, unsigned int mode, int nr))
00183 {
00184 if (q)
00185 {
00186 unsigned long flags;
00187 wq_read_lock_irqsave(&q->lock, flags);
00188 __wake_up_common(q, mode, nr, 0);
00189 wq_read_unlock_irqrestore(&q->lock, flags);
00190 }
00191 }
00192
00195 void FASTCALL(__wake_up_sync(wait_queue_head_t * q, unsigned int mode, int nr))
00196 {
00197 if (q)
00198 {
00199 unsigned long flags;
00200 wq_read_lock_irqsave(&q->lock, flags);
00201 __wake_up_common(q, mode, nr, 1);
00202 wq_read_unlock_irqrestore(&q->lock, flags);
00203 }
00204 }
00205
00206 #define SLEEP_ON_VAR \
00207 unsigned long flags; \
00208 wait_queue_t wait; \
00209 init_waitqueue_entry(&wait, current);
00210
00211 #define SLEEP_ON_HEAD \
00212 wq_write_lock_irqsave(&q->lock,flags); \
00213 __add_wait_queue(q, &wait); \
00214 wq_write_unlock(&q->lock);
00215
00216 #define SLEEP_ON_TAIL \
00217 wq_write_lock_irq(&q->lock); \
00218 __remove_wait_queue(q, &wait); \
00219 wq_write_unlock_irqrestore(&q->lock,flags);
00220
00223 void FASTCALL(interruptible_sleep_on(wait_queue_head_t * q))
00224 {
00225 SLEEP_ON_VAR current->state = TASK_INTERRUPTIBLE;
00226
00227 SLEEP_ON_HEAD schedule();
00228 SLEEP_ON_TAIL
00229 }
00230
00233 long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t * q, long timeout))
00234 {
00235 SLEEP_ON_VAR current->state = TASK_INTERRUPTIBLE;
00236
00237 SLEEP_ON_HEAD timeout = schedule_timeout(timeout);
00238 SLEEP_ON_TAIL return timeout;
00239 }
00240
00243 void FASTCALL(sleep_on(wait_queue_head_t * q))
00244 {
00245 SLEEP_ON_VAR current->state = TASK_UNINTERRUPTIBLE;
00246
00247 SLEEP_ON_HEAD schedule();
00248 SLEEP_ON_TAIL
00249 }
00250
00253 long FASTCALL(sleep_on_timeout(wait_queue_head_t * q, long timeout))
00254 {
00255 SLEEP_ON_VAR current->state = TASK_UNINTERRUPTIBLE;
00256
00257 SLEEP_ON_HEAD timeout = schedule_timeout(timeout);
00258 SLEEP_ON_TAIL return timeout;
00259 }
00260
00264 void daemonize(void) {
00265 LOGd(DEBUG_MSG, "dde: dummy daemonize() call");
00266 }
00267
00270 void yield(void)
00271 {
00272 set_current_state(TASK_RUNNING);
00273
00274 schedule();
00275 }
00276
00279 void __cond_resched(void)
00280 {
00281 set_current_state(TASK_RUNNING);
00282 schedule();
00283 }
00284