Main Page | Modules | Namespace List | Class Hierarchy | Class List | Directories | File List | Namespace Members | Class Members | File Members

thread_i.h

Go to the documentation of this file.
00001 // AUTOMATICALLY GENERATED -- DO NOT EDIT!         -*- c++ -*-
00002 
00003 #ifndef thread_i_h
00004 #define thread_i_h
00005 
00006 #include <cassert>
00007 #include <cstdlib>              // panic()
00008 #include "atomic.h"
00009 #include "entry_frame.h"
00010 #include "globals.h"
00011 #include "irq_alloc.h"
00012 #include "kdb_ke.h"
00013 #include "logdefs.h"
00014 #include "map_util.h"
00015 #include "sched_context.h"
00016 #include "space_index_util.h"
00017 #include "std_macros.h"
00018 #include "thread_state.h"
00019 
00020 #include "config.h"
00021 #include "cpu.h"
00022 #include "cpu_lock.h"
00023 #include "mem_layout.h"
00024 #include "logdefs.h"
00025 #include "paging.h"
00026 #include "processor.h"          // for cli/sti
00027 #include "regdefs.h"
00028 #include "std_macros.h"
00029 #include "thread.h"
00030 #include "timer.h"
00031 #include "trap_state.h"
00032 #include "vmem_alloc.h"
00033 #include "globalconfig.h"
00034 #include "idt.h"
00035 #include "io.h"
00036 #include "kernel_console.h"
00037 #include "simpleio.h"
00038 #include "terminate.h"
00039 #include "utcb_init.h"
00040 #include "watchdog.h"
00041 
00042 #include <cstdio>
00043 
00044 #include "l4_types.h"
00045 
00046 #include "config.h"
00047 #include "irq.h"
00048 #include "irq_alloc.h"
00049 #include "logdefs.h"
00050 #include "map_util.h"
00051 #include "space.h"
00052 #include "space_index.h"
00053 #include "space_index_util.h"
00054 #include "std_macros.h"
00055 #include "thread.h"
00056 #include "warn.h"
00057 
00058 #include "l4_types.h"
00059 #include "config.h"
00060 #include "space_index.h"
00061 
00062 #include <cstdlib>              // panic()
00063 
00064 #include "l4_types.h"
00065 
00066 #include "config.h"
00067 #include "cpu_lock.h"
00068 #include "dirq.h"
00069 #include "ipc_timeout.h"
00070 #include "lock_guard.h"
00071 #include "logdefs.h"
00072 #include "map_util.h"
00073 #include "processor.h"
00074 #include "kdb_ke.h"
00075 
00076 // The ``long IPC'' mechanism.
00077 
00078 // IDEAS for enhancing this implementation:
00079 
00080 // Currently, we flush the address-space window used for copying
00081 // memory during IPC every time we switch threads.  This is costly
00082 // because it involves a TLB flush.  We could maybe avoid a lot of
00083 // flushes by doing them lazily -- that is, we only do them when the
00084 // window is used by someone else. -- I haven't really thought about
00085 // whether this really makes sense with a fully interruptible IPC path
00086 // that is possibly executed by more than one thread at the same time.
00087 
00088 // We should use only the first one of the two address-space windows
00089 // if an indirect string actually refers to a buffer that is already
00090 // mapped in the first window.
00091 
00092 // The address-space-window setup function should actually fill the
00093 // corresponding page-directory entries safely (using cli) instead of
00094 // leaving that work to the page-fault handler.  This is safe because
00095 // those page-directory entries are flushed during the next thread
00096 // switch.
00097 
00098 // XXX: I'm not sure whether we return the correct error codes for
00099 // page fault timeouts in any of the sender's and receiver's address
00100 // spaces.
00101 
00102 #include "l4_types.h"
00103 
00104 #include "config.h"
00105 #include "cpu_lock.h"
00106 #include "std_macros.h"
00107 
00108 #include "globalconfig.h"
00109 #include "kmem.h"
00110 #include "long_msg.h"
00111 #include "std_macros.h"
00112 
00113 #include "cpu_lock.h"
00114 
00115 #include <cstdio>
00116 
00117 #include "config.h"
00118 #include "cpu.h"
00119 #include "kmem.h"
00120 #include "processor.h"
00121 #include "thread.h"
00122 
00123 #include <alloca.h>
00124 #include <cstring>
00125 #include "config.h"
00126 #include "idt.h"
00127 #include "jdb_tbuf.h"
00128 #include "types.h"
00129 
00130 #include <cstdio>
00131 #include "simpleio.h"
00132 #include "cpu.h"
00133 #include "kernel_console.h"
00134 #include "reset.h"
00135 #include "tss.h"
00136 #include "watchdog.h"
00137 
00138 //
00139 // IMPLEMENTATION of inline functions follows
00140 //
00141 
00142 
00143 
00144 
00145 inline void
00146 Thread::user_invoke_generic()
00147 {
00148   assert (current()->state() & Thread_ready);
00149 
00150   // release CPU lock explicitly, because
00151   // * the context that switched to us holds the CPU lock
00152   // * we run on a newly-created stack without a CPU lock guard
00153   cpu_lock.clear();
00154 
00155   current_thread()->rcv_startup_msg();          // set sp and ip
00156 }
00157 
00158 
00159 
00160 inline void
00161 Thread::enqueue_thread0_other_task()
00162 {
00163   // other task -> enqueue in front of this task
00164   present_enqueue
00165     (lookup_first_thread
00166      (Thread::lookup (thread_lock()->lock_owner())
00167       ->space_index())->present_prev);
00168   // that's safe because thread 0 of a task is always present
00169 }
00170 
00171 
00172 
00173 inline int
00174 Thread::is_privileged_for_debug(Trap_state * /*ts*/)
00175 {
00176 #if 0
00177   return ((ts->eflags & EFLAGS_IOPL) == EFLAGS_IOPL_U);
00178 #else
00179   return 1;
00180 #endif
00181 }
00182 
00183 
00184 
00185 inline bool
00186 Thread::get_ioport(Address /*eip*/, Trap_state * /*ts*/,
00187                    unsigned * /*port*/, unsigned * /*size*/)
00188 {
00189   return false;
00190 }
00191 
00192 
00193 
00194 inline void
00195 Thread::enqueue_thread_other_task()
00196 {
00197   enqueue_thread0_other_task();
00198 }
00199 
00200 
00201 
00202 inline void
00203 Thread::setup_lipc_utcb()
00204 {}
00205 
00206 
00207 
00208 inline void
00209 Thread::setup_exception_ipc() 
00210 {}
00211 
00212 
00217 inline void
00218 Thread::arch_init()
00219 {
00220   // clear out user regs that can be returned from the thread_ex_regs
00221   // system call to prevent covert channel
00222   Entry_frame *r = regs();
00223   r->sp(0);
00224   r->ip(0);
00225   if(Config::enable_io_protection)
00226     r->flags(EFLAGS_IOPL_K | EFLAGS_IF | 2);    // ei
00227   else
00228     r->flags(EFLAGS_IOPL_U | EFLAGS_IF | 2);    // XXX iopl=kernel
00229   r->cs(Gdt::gdt_code_user | Gdt::Selector_user);
00230   r->ss(Gdt::gdt_data_user | Gdt::Selector_user);
00231 
00232 #ifdef CONFIG_HANDLE_SEGMENTS
00233   _fs = Gdt::gdt_data_user | Gdt::Selector_user;
00234   _gs = Utcb_init::gs_value();
00235 #endif
00236 
00237   // make sure the thread's kernel stack is mapped in its address space
00238   _task->kmem_update(this);
00239 }
00240 
00241 
00242 
00243 inline int
00244 Thread::check_trap13_kernel(Trap_state *ts, bool from_user)
00245 {
00246   if (EXPECT_FALSE (ts->trapno == 13 && (ts->err & 3) == 0))
00247     {
00248       // First check if user loaded a segment register with 0 because the
00249       // resulting exception #13 can be raised from user _and_ kernel. If
00250       // the user tried to load another segment selector, the thread gets
00251       // killed.
00252       // XXX Should we emulate this too? Michael Hohmuth: Yes, we should.
00253       if (EXPECT_FALSE (!(ts->ds & 0xffff)))
00254         {
00255           Cpu::set_ds (Gdt::data_segment (from_user));
00256           return 0;
00257         }
00258       if (EXPECT_FALSE (!(ts->es & 0xffff)))
00259         {
00260           Cpu::set_es (Gdt::data_segment (from_user));
00261           return 0;
00262         }
00263       if (EXPECT_FALSE (!(ts->fs & 0xffff)))
00264         {
00265           ts->fs = Gdt::gdt_data_user | Gdt::Selector_user;
00266           return 0;
00267         }
00268       if (EXPECT_FALSE (!(ts->gs & 0xffff)))
00269         {
00270           ts->gs = Utcb_init::gs_value();
00271           return 0;
00272         }
00273       if (EXPECT_FALSE (ts->ds & 0xfff8) == Gdt::gdt_code_user)
00274         {
00275           WARN("%x.%x eip=%08lx: code selector ds=%04lx",
00276                d_taskno(), d_threadno(), ts->eip, ts->ds & 0xffff);
00277           Cpu::set_ds (Gdt::data_segment (from_user));
00278           return 0;
00279         }
00280       if (EXPECT_FALSE (ts->ds & 0xfff8) == Gdt::gdt_code_user)
00281         {
00282           WARN("%x.%x eip=%08lx: code selector es=%04lx",
00283                d_taskno(), d_threadno(), ts->eip, ts->es & 0xffff);
00284           Cpu::set_es (Gdt::data_segment (from_user));
00285           return 0;
00286         }
00287       if (EXPECT_FALSE (ts->ds & 0xfff8) == Gdt::gdt_code_user)
00288         {
00289           WARN("%x.%x eip=%08lx: code selector fs=%04lx",
00290                d_taskno(), d_threadno(), ts->eip, ts->fs & 0xffff);
00291           ts->fs = Gdt::gdt_data_user | Gdt::Selector_user;
00292           return 0;
00293         }
00294       if (EXPECT_FALSE (ts->ds & 0xfff8) == Gdt::gdt_code_user)
00295         {
00296           WARN("%x.%x eip=%08lx: code selector gs=%04lx",
00297                d_taskno(), d_threadno(), ts->eip, ts->gs & 0xffff);
00298           ts->gs = Utcb_init::gs_value();
00299           return 0;
00300         }
00301     }
00302 
00303   return 1;
00304 }
00305 
00306 
00307 
00308 inline void
00309 Thread::check_f00f_bug(Trap_state *ts)
00310 {
00311   // If we page fault on the IDT, it must be because of the F00F bug.
00312   // Figure out exception slot and raise the corresponding exception.
00313   // XXX: Should we also modify the error code?
00314   if (ts->trapno == 14          // page fault?
00315       && ts->cr2 >= Idt::idt()
00316       && ts->cr2 <  Idt::idt() + Idt::_idt_max * 8)
00317     ts->trapno = (ts->cr2 - Idt::idt()) / 8;
00318 }
00319 
00320 
00321 
00322 inline bool
00323 Thread::handle_io_page_fault(Trap_state *ts, Address eip, bool from_user)
00324 {
00325   // check for page fault at the byte following the IO bitmap
00326   if (Config::enable_io_protection 
00327       && ts->trapno == 14           // page fault?
00328       && (ts->err & 4) == 0         // in supervisor mode?
00329       && (eip < Kmem::mem_user_max) // delimiter byte accessed?
00330       && (ts->cr2 == Mem_layout::Io_bitmap + L4_fpage::Io_port_max / 8))
00331     {
00332       // page fault in the first byte following the IO bitmap
00333       // map in the cpu_page read_only at the place
00334       Space::Status result =
00335         space()->v_insert (space()->virt_to_phys_s0 
00336                              ((void*)Kmem::io_bitmap_delimiter_page()),
00337                            Mem_layout::Io_bitmap + L4_fpage::Io_port_max / 8,
00338                            Config::PAGE_SIZE,
00339                            Pd_entry::global());
00340 
00341       switch (result)
00342         {
00343         case Space::Insert_ok:
00344           return true;
00345         case Space::Insert_err_nomem:
00346           // kernel failure, translate this into a general protection
00347           // violation and hope that somebody handles it
00348           ts->trapno = 13;
00349           ts->err    =  0;
00350           return false;
00351         default:
00352           // no other error code possible
00353           assert (false);
00354         }
00355     }
00356 
00357   // Check for IO page faults. If we got exception #14, the IO bitmap page is
00358   // not available. If we got exception #13, the IO bitmap is available but
00359   // the according bit is set. In both cases we have to dispatch the code at
00360   // the faulting eip to deterine the IO port and send an IO flexpage to our
00361   // pager. If it was a page fault, check the faulting address to prevent
00362   // touching userland.
00363   if (Config::enable_io_protection && eip < Kmem::mem_user_max &&
00364       (ts->trapno == 13 ||
00365        ts->trapno == 14 && Kmem::is_io_bitmap_page_fault (ts->cr2)))
00366     {
00367       unsigned port, size;
00368       if (get_ioport (eip, ts, &port, &size))
00369         {
00370           Mword io_page = L4_fpage::io (port, size, 0).raw();
00371 
00372           // set User mode flag to get correct EIP in handle_page_fault_pager
00373           // pretend a write page fault
00374           static const unsigned io_error_code = PF_ERR_WRITE | PF_ERR_USERMODE;
00375 
00376           CNT_IO_FAULT;
00377 
00378           if (EXPECT_FALSE (log_page_fault()))
00379             page_fault_log (io_page, io_error_code, eip);
00380 
00381           if (Config::monitor_page_faults)
00382             {
00383               if (_last_pf_address    == io_page &&
00384                   _last_pf_error_code == io_error_code)
00385                 {
00386                   if (!log_page_fault())
00387                     printf ("*IO[%x,%x,%lx]\n", port, size, eip);
00388                   else
00389                     putchar ('\n');
00390 
00391                   kdb_ke ("PF happened twice");
00392                 }
00393 
00394               _last_pf_address    = io_page;
00395               _last_pf_error_code = io_error_code;
00396 
00397               // (See also corresponding code in
00398               //  Thread::handle_page_fault() and Thread::handle_slow_trap.)
00399             }
00400 
00401           // treat it as a page fault in the region above 0xf0000000,
00402 
00403           // We could also reset the Thread_cancel at slowtraps entry but it
00404           // could be harmful for debugging (see also comment at slowtraps:).
00405           //
00406           // This must be done while interrupts are off to prevent that an
00407           // other thread sets the flag again.
00408           state_del (Thread_cancel);
00409 
00410           Ipc_err ipc_code = handle_page_fault_pager (io_page, io_error_code);
00411 
00412           if (!ipc_code.has_error())
00413             {
00414               // check for cli/sti
00415               Unsigned8 instr = space()->peek ((Unsigned8*) eip, from_user);
00416 
00417               if (space()->is_privileged() &&
00418                   (instr == 0xfa /*cli*/ || instr == 0xfb /*sti*/))
00419                 {
00420                   // lazily link in IOPL if necessary
00421                   if ((ts->eflags & EFLAGS_IOPL) != EFLAGS_IOPL_U)
00422                     ts->eflags |= EFLAGS_IOPL_U;
00423                 }
00424 
00425               return true;
00426             }
00427           // fallthrough if unsuccessful (maybe to user installed handler)
00428         }
00429     }
00430   return false;
00431 }
00432 
00433 
00434 
00435 inline bool
00436 Thread::handle_sysenter_trap(Trap_state *ts, Address eip, bool from_user)
00437 {
00438   if (EXPECT_FALSE
00439       ((ts->trapno == 6 || ts->trapno == 13)
00440        && (ts->err & 0xffff) == 0
00441        && (eip < Kmem::mem_user_max - 2)
00442        && (space()->peek ((Unsigned16*) eip, from_user)) == 0x340f))
00443     {
00444       // somebody tried to do sysenter on a machine without support for it
00445       WARN ("%x.%x (tcb=%p) killed:\n"
00446             "\033[1;31mSYSENTER not supported on this machine\033[0m",
00447             d_taskno(), d_threadno(), this);
00448 
00449       if (Cpu::have_sysenter())
00450         // GP exception if sysenter is not correctly set up..
00451         WARN ("SYSENTER_CS_MSR: %llx", Cpu::rdmsr (SYSENTER_CS_MSR));
00452       else
00453         // We get UD exception on processors without SYSENTER/SYSEXIT.
00454         WARN ("SYSENTER/EXIT not available.");
00455 
00456       return false;
00457     }
00458 
00459   return true;
00460 }
00461 
00462 
00463 
00464 inline bool
00465 Thread::trap_is_privileged(Trap_state *)
00466 { return space()->is_privileged(); }
00467 
00468 
00469 
00470 inline void
00471 Thread::do_wrmsr_in_kernel(Trap_state *ts)
00472 {
00473   // do "wrmsr (msr[ecx], edx:eax)" in kernel
00474   Cpu::wrmsr (ts->eax, ts->edx, ts->ecx);
00475 }
00476 
00477 
00478 
00479 inline void
00480 Thread::do_rdmsr_in_kernel(Trap_state *ts)
00481 {
00482   // do "rdmsr (msr[ecx], edx:eax)" in kernel
00483   Unsigned64 msr = Cpu::rdmsr (ts->ecx);
00484   ts->eax = (Unsigned32) msr;
00485   ts->edx = (Unsigned32) (msr >> 32);
00486 }
00487 
00488 
00489 
00490 inline int
00491 Thread::handle_not_nested_trap(Trap_state *ts)
00492 {
00493   // no kernel debugger present
00494   printf (" %x.%02x EIP=%08lx Trap=%02lx [Ret/Esc]\n", 
00495           d_taskno(), d_threadno(), ts->eip, ts->trapno); 
00496 
00497   int r;
00498   // cannot use normal getchar because it may block with hlt and irq's
00499   // are off here
00500   while ((r=Kconsole::console()->getchar (false)) == -1)
00501     Proc::pause();
00502 
00503   if (r == '\033')
00504     terminate (1);
00505 
00506   return 0;
00507 }
00508 
00509 
00510 
00511 inline bool
00512 Thread::handle_lldt(Trap_state *)
00513 {
00514   return 0;
00515 }
00516 
00517 
00518 /*
00519  * Return current timesharing parameters
00520  */
00521 
00522 inline void
00523 Thread::get_timesharing_param(L4_sched_param *param,
00524                                L4_uid *preempter,
00525                                L4_uid *ipc_partner)
00526 {
00527   Mword s = state();
00528 
00529   *preempter = _ext_preempter ? _ext_preempter->id() : L4_uid::Invalid;
00530 
00531   *ipc_partner = s & (Thread_polling | Thread_receiving) && partner() ?
00532                       partner()->id() : L4_uid::Invalid;
00533 
00534   if (s & Thread_dead)
00535     s = 0xf;
00536   else if (s & Thread_polling)
00537     s = (s & Thread_ready) ? 8 : 0xd;
00538   else if (s & Thread_receiving)
00539     s = (s & Thread_ready) ? 8 : 0xc;
00540   else
00541     s = 0;
00542 
00543   param->prio (sched_context()->prio());
00544   param->time (sched_context()->quantum());
00545   param->thread_state (s);
00546 }
00547 
00548 
00549 /*
00550  * Set scheduling parameters for timeslice with id 'id'
00551  */
00552 
00553 inline Mword
00554 Thread::set_schedule_param(L4_sched_param param, unsigned short const id)
00555 {
00556   if (EXPECT_FALSE (param.prio() > thread_lock()->lock_owner()->mcp()))
00557     return ~0U;
00558 
00559   // We need to protect the priority manipulation so that this thread
00560   // cannot be preempted and ready-enqueued according to a wrong
00561   // priority and the current timeslice cannot change while we are
00562   // manipulating it
00563 
00564   Lock_guard <Cpu_lock> guard (&cpu_lock);
00565 
00566   Sched_context *s = sched_context (id);
00567   if (!s)
00568     return ~0U;
00569 
00570   if (s->prio() != param.prio())
00571     {
00572       // Dequeue from ready-list if we manipulate the current time slice
00573       if (s == sched())
00574         ready_dequeue();
00575 
00576       // ready_enqueue happens during thread_lock release
00577       s->set_prio (param.prio());
00578     }
00579 
00580   Unsigned64 q = param.time();
00581   if (q != (Unsigned64) -1)
00582     {
00583       q = round_quantum (q);
00584       s->set_quantum (q);
00585       if (s != sched())
00586         s->set_left (q);
00587     }
00588 
00589   return 0;
00590 }
00591 
00592 
00593 /*
00594  * Set preempter unless "invalid"
00595  */
00596 
00597 inline void
00598 Thread::set_preempter(L4_uid const preempter)
00599 {
00600   if (EXPECT_FALSE (preempter.is_invalid()))
00601     return;
00602 
00603   Thread *p = lookup (preempter);
00604 
00605   if (p && p->exists())
00606     _ext_preempter = p;
00607 }
00608 
00609 
00610 /*
00611  * Add a realtime timeslice at the end of the list
00612  */
00613 
00614 inline Mword
00615 Thread::set_realtime_param(L4_sched_param param)
00616 {
00617   if (EXPECT_FALSE (mode() & Periodic || _deadline_timeout.is_set() ||
00618       param.prio() > thread_lock()->lock_owner()->mcp() ||
00619       param.time() == (Unsigned64) -1))
00620     return ~0U;
00621 
00622   Sched_context *s = new Sched_context (this,
00623                                         sched_context()->prev()->id() + 1,
00624                                         param.prio(),
00625                                         round_quantum (param.time()));
00626 
00627   s->enqueue_before (sched_context());
00628 
00629   return 0;
00630 }
00631 
00632 
00633 /*
00634  * Remove all realtime timeslices
00635  */
00636 
00637 inline Mword
00638 Thread::remove_realtime_param()
00639 {
00640   Lock_guard <Cpu_lock> guard (&cpu_lock);
00641 
00642   if (EXPECT_FALSE (mode() & Periodic || _deadline_timeout.is_set()))
00643     return ~0U;
00644 
00645   assert (sched() == sched_context());
00646 
00647   preemption()->set_pending (0);
00648   preemption()->sender_dequeue (preemption()->receiver()->sender_list());
00649 
00650   Sched_context *s, *tmp;
00651 
00652   for (s  = sched_context()->next();
00653        s != sched_context();
00654        tmp = s, s = s->next(), tmp->dequeue(), delete tmp);
00655 
00656   return 0;
00657 }
00658 
00659 
00660 
00661 inline ALWAYS_INLINE int
00662 Thread::handle_inter_task_ex_regs(Sys_ex_regs_frame *,
00663                                   L4_uid *, Thread **, Task **, Thread **)
00664 {
00665   return 0;
00666 }
00667 
00668 
00673 inline Unsigned64
00674 Thread::round_quantum(Unsigned64 quantum)
00675 {
00676   return quantum + Config::scheduler_granularity - 1
00677        - mod32 (quantum + Config::scheduler_granularity - 1,
00678                 Config::scheduler_granularity);
00679 }
00680 
00681 
00689 inline Unsigned64
00690 Thread::snd_timeout(L4_timeout t, Sys_ipc_frame const * regs)
00691 {
00692   // absolute timeout
00693   if (EXPECT_FALSE (regs->has_abs_snd_timeout()))
00694     {
00695       Unsigned64 sc = Timer::system_clock();
00696       Unsigned64 tval = t.snd_microsecs_abs (sc, regs->abs_snd_clock());
00697 
00698       // check if timeout already expired
00699       return tval <= sc ? 0 : tval;
00700     }
00701 
00702   // zero timeout
00703   if (t.snd_man() == 0)
00704     return 0;
00705 
00706   // relative timeout
00707   return t.snd_microsecs_rel (Timer::system_clock());
00708 }
00709 
00710 
00718 inline Unsigned64
00719 Thread::rcv_timeout(L4_timeout t, Sys_ipc_frame const *regs)
00720 {
00721   // absolute timeout
00722   if (EXPECT_FALSE (regs->has_abs_rcv_timeout()))
00723     {
00724       Unsigned64 sc = Timer::system_clock();
00725       Unsigned64 tval = t.rcv_microsecs_abs (sc, regs->abs_rcv_clock());
00726 
00727       // check if timeout already expired
00728       return tval <= sc ? 0 : tval;
00729     }
00730 
00731   // zero timeout
00732   if (t.rcv_man() == 0)
00733     return 0;
00734 
00735   // relative timeout
00736   return t.rcv_microsecs_rel (Timer::system_clock());
00737 }
00738 
00739 
00748 inline void Thread::prepare_receive(Sender *partner,
00749                              Sys_ipc_frame *regs)
00750 {
00751 #if 0
00752   if (partner && partner->state() == Thread_invalid) // partner nonexistent?
00753     return;                     // just return
00754   // don't need to signal error here -- it will be detected later
00755 #endif
00756 
00757   setup_receiver (partner, regs);
00758 
00759   // if we don't have a send, get us going here; otherwise, the send
00760   // operation will set the Thread_ipc_in_progress flag after it has
00761   // initialized
00762   if (!regs->has_snd())
00763     {
00764       state_add (Thread_ipc_in_progress);
00765       if (state() & Thread_cancel)
00766         {
00767           state_del (Thread_ipc_in_progress);
00768         }
00769     }
00770 }
00771 
00772 
00773 
00774 inline bool
00775 Thread::invalid_ipc_buffer(void const *a)
00776 {
00777   return Mem_layout::in_kernel(((Address)a & Config::SUPERPAGE_MASK)
00778                                + Config::SUPERPAGE_SIZE - 1);
00779 }
00780 
00781 #endif // thread_i_h

Generated on Mon Sep 26 14:20:12 2005 for Fiasco by  doxygen 1.4.2