Main Page | Modules | Namespace List | Class Hierarchy | Class List | Directories | File List | Namespace Members | Class Members | File Members

thread.h

Go to the documentation of this file.
00001 // AUTOMATICALLY GENERATED -- DO NOT EDIT!         -*- c++ -*-
00002 
00003 #ifndef thread_h
00004 #define thread_h
00005 
00006 #include <setjmp.h>             // typedef jmp_buf
00007 
00008 #include "l4_types.h"
00009 
00010 #include "activation.h"
00011 #include "config.h"
00012 #include "deadline_timeout.h"
00013 #include "mem_layout.h"
00014 #include "preemption.h"
00015 #include "receiver.h"
00016 #include "sender.h"
00017 #include "space.h"              // Space_index
00018 #include "thread_lock.h"
00019 
00020 #include "trap_state.h"
00021 
00022 //
00023 // INTERFACE definition follows 
00024 //
00025 
00026 
00027 class Irq_alloc;
00028 class Return_frame;
00029 class Syscall_frame;
00030 class Task;
00031 
00032 class Idt_entry;
00033 
00034 class Trap_state;
00035 class Sys_ex_regs_frame;
00036 
00037 class Sys_ipc_frame;
00038 
00041 class Thread : public Receiver, public Sender
00042 {
00043   friend class Jdb;
00044   friend class Jdb_bt;
00045   friend class Jdb_tcb;
00046   friend class Jdb_thread_list;
00047   friend class Jdb_list_threads;
00048   friend class Jdb_list_timeouts;
00049   friend class Jdb_tbuf_show;
00050 
00051 public:
00052 
00053   typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
00054 
00065   Thread (Task* task, Global_id id,
00066           unsigned short init_prio, unsigned short mcp);
00067 
00068   void sys_ipc();
00069   void sys_fpage_unmap();
00070   void sys_thread_switch();
00071   void sys_thread_schedule();
00072   void sys_task_new();
00073   void sys_id_nearest();
00074   void sys_thread_ex_regs();
00075 
00076   bool handle_page_fault (Address pfa, Mword error, Mword pc);
00077 
00084   Task_num Thread::d_taskno();
00085 
00090   LThread_num Thread::d_threadno();
00091 
00092 private:
00093   Thread(const Thread&);        
00094   void *operator new(size_t);   
00095 
00096   bool handle_sigma0_page_fault (Address pfa);
00097   bool handle_smas_page_fault (Address pfa, Mword error, Ipc_err &ipc_code);
00098 
00102   void kill_small_space();
00103 
00107   Mword small_space();
00108 
00112   void set_small_space (Mword nr);
00113 
00120   static void user_invoke();
00121 
00127   void rcv_startup_msg();
00128 
00129   bool Thread::associate_irq(Irq_alloc *irq);
00130   void Thread::disassociate_irq(Irq_alloc *irq);
00131 
00132 public:
00140   static Thread * Thread::lookup (L4_uid id, Space *s);
00141 
00142   static Mword pagein_tcb_request(Address pc);
00143   Mword is_tcb_mapped() const;
00144 
00145 protected:
00146   // implementation details follow...
00147 
00148   // DATA
00149 
00150   // Preemption IPC sender role
00151   Preemption _preemption;
00152 
00153   // Deadline Timeout
00154   Deadline_timeout _deadline_timeout;
00155 
00156   // Activation IPC sender role
00157   Activation _activation;
00158 
00159   // Another critical TCB cache line:
00160   Task*        _task;
00161   Thread_lock  _thread_lock;
00162 
00163   // More ipc state
00164   Thread *_pager, *_ext_preempter;
00165   Thread *present_next, *present_prev;
00166 //  Irq_alloc *_irq;
00167 
00168   // long ipc state
00169   L4_rcv_desc _target_desc;     // ipc buffer in receiver's address space
00170   unsigned _pagein_status_code;
00171 
00172   Address _vm_window0, _vm_window1; // data windows for the
00173                                 // IPC partner's address space (for long IPC)
00174   jmp_buf *_recover_jmpbuf;     // setjmp buffer for page-fault recovery
00175   L4_timeout _pf_timeout;       // page-fault timeout specified by partner
00176 
00177   // debugging stuff
00178   Address _last_pf_address;
00179   unsigned _last_pf_error_code;
00180 
00181   unsigned _magic;
00182   static const unsigned magic = 0xf001c001;
00183 
00184   // Constructor
00185   Thread (Task* task, L4_uid id);
00186 
00187   // Thread killer
00188   void kill_all();
00189 private:
00190 
00191 protected:
00192   Irq_alloc *_irq;
00193 private:
00194 
00195 private:
00199   static Mword const exception_cs();
00200 
00201 protected:
00202   Idt_entry *_idt;
00203   Unsigned16 _idt_limit;
00204 
00205   static Trap_state::Handler nested_trap_handler FIASCO_FASTCALL;
00206 private:
00207 
00208 private:
00209   // small address space stuff
00210   bool        handle_smas_gp_fault ();
00211   static int  (*int3_handler)(Trap_state*);
00212 private:
00213 
00214 public:
00215   inline bool ipc_short_cut();
00216 
00217 private:
00228   Mword ipc_send_regs (Thread* receiver,
00229                        Sys_ipc_frame const *sender_regs);
00230 
00239   void page_fault_msg (Sys_ipc_frame &r, Address fault_address, 
00240                        Mword fault_ip, Mword err);
00241 
00250   void commit_ipc_success (Sys_ipc_frame *regs, Ipc_err err);
00251 
00260   void commit_ipc_failure (Sys_ipc_frame *regs, Ipc_err err);
00261 
00268   Ipc_err get_ipc_err (Sys_ipc_frame *regs);
00269 
00278   void set_source_local_id(Thread *receiver,  Sys_ipc_frame *dest_regs);
00279 private:
00280 
00281 private:
00288   Address remote_fault_addr (Address pfa);
00289 
00298   Mword update_ipc_window (Address pfa, Mword error_code, Address remote_pfa);
00299 private:
00300 
00301 private:
00302   static void handle_double_fault (void) asm ("thread_handle_double_fault");
00303 
00304 public:
00305   static bool may_enter_jdb;
00306 
00307 public:  
00314   inline void * operator new(size_t, L4_uid id);
00315   
00319   inline void operator delete(void *);
00320   
00328   virtual ~Thread();            // To be called in locked state.
00329   
00334   static inline Thread* lookup(Context* c);
00335   
00336   //
00337   // state requests/manipulation
00338   //
00339   
00343   inline Task * task() const;
00344   
00350   inline Thread_lock * thread_lock();
00351   
00352   inline Preemption * preemption();
00353   
00354   inline void handle_timer_interrupt();
00355   
00362   static inline Thread * lookup(Global_id id);
00363   
00364   void halt();
00365   
00366   static void halt_current();
00367   
00371   static inline Thread * lookup_first_thread(unsigned space);
00372   
00376   inline Space_index space_index() const;
00377   
00381   inline Space_index chief_index() const;
00382   
00391   bool kill_task(Space_index subtask);
00392   
00397   inline unsigned nest() const;
00398   
00399   inline bool has_privileged_iopl();
00400   
00401   inline void unset_utcb_ptr();
00402   
00403   static void privilege_initialization();
00404   
00416   bool initialize(Address ip, Address sp, Thread* pager, Receiver* preempter, Address *o_ip = 0, Address *o_sp = 0, Thread* *o_pager = 0, Receiver* *o_preempter = 0, Address *o_flags = 0, bool no_cancel = 0, bool alien = 0);
00417   
00428   int handle_slow_trap(Trap_state *ts);
00429   
00439   inline bool raise_exception(Trap_state *ts, Address handler);
00440   
00441   inline int snd_exception(Trap_state *) const;
00442   
00443   /*
00444    * Handle FPU trap for this context. Assumes disabled interrupts
00445    */
00446   inline void handle_fpu_trap();
00447   
00448   static inline void set_int3_handler(int (*handler)(Trap_state *ts));
00449   
00458   virtual void ipc_receiver_ready();
00459   
00467   Ipc_err ipc_continue(Ipc_err ipc_code);
00468   
00469   inline Thread * next_present() const;
00470   
00471   static inline int log_page_fault();
00472   
00476   inline void sys_ipc_log();
00477   
00480   inline void sys_ipc_trace();
00481   
00484   inline void sys_fpage_unmap_log();
00485   
00486   // Note that we don't want to check for Thread_invalid since we don't want
00487   // to raise page faults from inside the kernel debugger
00488   inline int is_mapped();
00489   
00490   // check if thread is valid (i.e. valid address, thread mapped)
00491   int is_valid();
00492   
00493   void print_snd_partner(int task_format=0);
00494   
00495   // Be robust if partner is invalid
00496   void print_partner(int task_format=0);
00497   
00498   // Be robust if this object is invalid
00499   void print_uid(int task_format=0);
00500   
00501   void print_state_long(unsigned cut_on_len = 0);
00502   
00503   static inline Task_num get_task(Global_id id);
00504 
00505 protected:  
00523   inline void reset_ipc_window();
00524   
00535   inline void setup_ipc_window(unsigned win, Address address);
00536   
00540   inline bool in_present_list();
00541   
00546   inline void present_enqueue(Thread *sibling);
00547   
00551   inline void present_dequeue();
00552 
00553 private:  
00554   static inline void user_invoke_generic();
00555   
00562   bool kill();
00563   
00564   inline void copy_utcb_to(Thread*);
00565   
00566   inline void enqueue_thread0_other_task();
00567   
00568   inline int is_privileged_for_debug(Trap_state * /*ts*/);
00569   
00570   static void print_page_fault_error(Mword e);
00571   
00572   inline bool get_ioport(Address /*eip*/, Trap_state * /*ts*/,
00573   unsigned * /*port*/, unsigned * /*size*/);
00574   
00575   inline void enqueue_thread_other_task();
00576   
00577   inline void setup_lipc_utcb();
00578   
00579   inline void setup_exception_ipc();
00580   
00583   inline void setup_utcb_kernel_addr();
00584   
00588   inline void arch_init();
00589   
00596   static int handle_int3(Trap_state *ts);
00597   
00600   static int call_nested_trap_handler(Trap_state *ts);
00601   
00602   inline int check_trap13_kernel(Trap_state *ts, bool from_user);
00603   
00604   inline void check_f00f_bug(Trap_state *ts);
00605   
00606   inline bool handle_io_page_fault(Trap_state *ts, Address eip, bool from_user);
00607   
00608   inline bool handle_sysenter_trap(Trap_state *ts, Address eip, bool from_user);
00609   
00610   inline bool trap_is_privileged(Trap_state *);
00611   
00612   inline void do_wrmsr_in_kernel(Trap_state *ts);
00613   
00614   inline void do_rdmsr_in_kernel(Trap_state *ts);
00615   
00616   inline int handle_not_nested_trap(Trap_state *ts);
00617   
00618   inline bool handle_lldt(Trap_state *);
00619   
00620   /*
00621    * Return current timesharing parameters
00622    */
00623   inline void get_timesharing_param(L4_sched_param *param, L4_uid *preempter, L4_uid *ipc_partner);
00624   
00625   /*
00626    * Set scheduling parameters for timeslice with id 'id'
00627    */
00628   inline Mword set_schedule_param(L4_sched_param param, unsigned short const id);
00629   
00630   /*
00631    * Set preempter unless "invalid"
00632    */
00633   inline void set_preempter(L4_uid const preempter);
00634   
00635   /*
00636    * Add a realtime timeslice at the end of the list
00637    */
00638   inline Mword set_realtime_param(L4_sched_param param);
00639   
00640   /*
00641    * Remove all realtime timeslices
00642    */
00643   inline Mword remove_realtime_param();
00644   
00645   Mword begin_periodic(Unsigned64 clock, Mword const type);
00646   
00647   Mword end_periodic();
00648   
00649   inline void reset_nest(L4_uid id);
00650   
00651   inline void inc_nest(L4_uid id);
00652   
00653   inline void update_nest(L4_uid id);
00654   
00655   inline int handle_inter_task_ex_regs(Sys_ex_regs_frame *, L4_uid *, Thread **, Task **, Thread **);
00656   
00660   static inline Unsigned64 round_quantum(Unsigned64 quantum);
00661   
00668   inline Unsigned64 snd_timeout(L4_timeout t, Sys_ipc_frame const * regs);
00669   
00676   inline Unsigned64 rcv_timeout(L4_timeout t, Sys_ipc_frame const *regs);
00677   
00686   Ipc_err do_send(Thread *partner, L4_timeout t, Sys_ipc_frame *regs);
00687   
00695   inline void prepare_receive(Sender *partner, Sys_ipc_frame *regs);
00696   
00707   Ipc_err do_receive(Sender *sender, L4_timeout t, Sys_ipc_frame *regs);
00708   
00715   Ipc_err handle_page_fault_pager(Address pfa, Mword error_code);
00716   
00722   inline void unlock_receiver(Receiver *receiver, const Sys_ipc_frame*);
00723   
00732   Ipc_err handle_ipc_page_fault(Address pfa, Mword error_code);
00733   
00745   Ipc_err ipc_snd_fpage(Thread* receiver, L4_fpage from_fpage, L4_fpage to_fpage, Address offset, bool grant, bool finish, bool dont_switch);
00746   
00747   inline void wake_receiver(Thread *receiver);
00748   
00754   Ipc_err ipc_finish(Thread *receiver, Ipc_err new_state, bool dont_switch);
00755   
00765   Ipc_err ipc_pagein_request(Receiver* receiver, Address address, Mword error_code);
00766   
00767   inline bool invalid_ipc_buffer(void const *a);
00768   
00787   Ipc_err do_send_long(Thread *partner, Sys_ipc_frame *i_regs);
00788   
00791   void page_fault_log(Address pfa, unsigned error_code, unsigned eip);
00792 };
00793 
00794 // ----------------------------------------------------------------------------
00795 
00796 /*
00797  * Fiasco Thread Code
00798  * Shared between UX and native IA32.
00799  */
00800 
00801 
00802 extern void (*syscall_table[])();
00803 
00807 class Pf_msg_utcb_saver
00808 {
00809 public:
00810   Pf_msg_utcb_saver (const Utcb *u);
00811   void restore (Utcb *u);
00812 };
00813 
00817 inline Thread* current_thread();
00818 
00828 extern "C" FIASCO_FASTCALL int thread_page_fault(Address pfa, Mword error_code, Address ip, Mword flags, Return_frame *regs);
00829 
00837 extern "C" FIASCO_FASTCALL int thread_handle_trap(Trap_state *ts);
00838 
00839 // We are entering with disabled interrupts!
00840 extern "C" FIASCO_FASTCALL void thread_timer_interrupt(Address ip);
00841 
00844 extern "C" void thread_timer_interrupt_stop(void);
00845 
00850 extern "C" void thread_timer_interrupt_slow(void);
00851 
00855 // The "FPU not available" trap entry point
00856 extern "C" void thread_handle_fputrap();
00857 
00858 extern "C" void sys_thread_switch_wrapper();
00859 
00860 // these wrappers must come last in the source so that the real sys-call 
00861 // implementations can be inlined by g++
00862 extern "C" void sys_fpage_unmap_wrapper();
00863 
00864 extern "C" void sys_id_nearest_wrapper();
00865 
00866 extern "C" void sys_thread_ex_regs_wrapper();
00867 
00868 extern "C" void sys_task_new_wrapper();
00869 
00870 extern "C" void sys_thread_schedule_wrapper();
00871 
00872 extern "C" void sys_ipc_wrapper();
00873 
00874 extern "C" void ipc_short_cut_wrapper();
00875 
00876 extern "C" void sys_ipc_log_wrapper(void);
00877 
00878 extern "C" void sys_ipc_trace_wrapper(void);
00879 
00880 extern "C" void sys_fpage_unmap_log_wrapper(void);
00881 
00882 //
00883 // IMPLEMENTATION includes follow (for use by inline functions)
00884 //
00885 
00886 #include "fpu_alloc.h"
00887 #include "timeout.h"
00888 
00889 #include "fpu_state.h"
00890 #include "gdt.h"
00891 #include "entry_frame.h"
00892 #include "task.h"
00893 
00894 #include "mod32.h"
00895 #include "map_util.h"
00896 #include "space.h"
00897 #include "lock_guard.h"
00898 #include "kdb_ke.h"
00899 #include "logdefs.h"
00900 #include "regdefs.h"
00901 #include "std_macros.h"
00902 #include "vmem_alloc.h"
00903 #include "warn.h"
00904 #include "jdb_trace.h"
00905 #include "cpu_lock.h"
00906 #include "config.h"
00907 #include "kmem.h"
00908 #include "mem_layout.h"
00909 
00910 #include <cstdio>
00911 #include "processor.h"
00912 #include "trap_state.h"
00913 
00914 //
00915 // IMPLEMENTATION of inline functions (and needed classes)
00916 //
00917 
00918 
00919 
00927 inline void *
00928 Thread::operator new(size_t, L4_uid id)
00929 {
00930   // Allocate TCB in TCB space.  Actually, do not allocate anything,
00931   // just return the address.  Allocation happens on the fly in
00932   // Thread::handle_page_fault().
00933   return static_cast <void*>(lookup (id));
00934 }
00935 
00936 
00941 inline void
00942 Thread::operator delete(void *)
00943 {
00944   // XXX should check if all thread blocks on a given page are free
00945   // and deallocate (or mark free) the page if so.  this should be
00946   // easy to detect since normally all threads of a given task are
00947   // destroyed at once at task deletion time
00948 }
00949 
00950 
00956 inline Thread*
00957 Thread::lookup(Context* c)
00958 {
00959   return reinterpret_cast<Thread*>(c);
00960 }
00961 
00962 
00963 //
00964 // state requests/manipulation
00965 //
00966 
00971 inline Task *
00972 Thread::task() const
00973 {
00974   return _task;
00975 }
00976 
00977 
00984 inline Thread_lock *
00985 Thread::thread_lock()
00986 {
00987   return &_thread_lock;
00988 }
00989 
00990 
00991 
00992 inline Preemption *
00993 Thread::preemption()
00994 {
00995   return &_preemption;
00996 }
00997 
00998 
00999 
01000 inline void
01001 Thread::handle_timer_interrupt()
01002 {
01003   // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
01004   if (!Config::fine_grained_cputime)
01005     consume_time (Config::scheduler_granularity);
01006 
01007   // Check if we need to reschedule due to timeouts or wakeups
01008   if (Timeout::do_timeouts() && !Context::schedule_in_progress())
01009     {
01010       schedule();
01011       assert (timeslice_timeout->is_set());     // Coma check
01012     }
01013 }
01014 
01015 
01023 inline Thread *
01024 Thread::lookup(Global_id id)
01025 {
01026   return reinterpret_cast <Thread *>
01027     (id.is_invalid() || id.gthread() >= Config::max_threads() ?
01028      0 : Mem_layout::Tcbs + (id.gthread() * Config::thread_block_size));
01029 }
01030 
01031 
01036 inline Thread *
01037 Thread::lookup_first_thread(unsigned space)
01038 { return lookup (Global_id (space, 0)); }
01039 
01040 
01045 inline Space_index
01046 Thread::space_index() const
01047 { return Space_index (id().task()); }
01048 
01049 
01054 inline Space_index
01055 Thread::chief_index() const
01056 { return Space_index (id().chief()); }
01057 
01058 
01064 inline unsigned
01065 Thread::nest() const
01066 {
01067   return id().nest();
01068 }
01069 
01070 
01071 
01072 inline bool
01073 Thread::has_privileged_iopl()
01074 {
01075   return false;
01076 }
01077 
01078 
01079 
01080 inline void
01081 Thread::unset_utcb_ptr() {}
01082 
01083 
01084 
01095 inline bool
01096 Thread::raise_exception(Trap_state *ts, Address handler)
01097 {
01098   Mword ip = ts->ip(), *sp = (Mword *) ts->sp();
01099   bool error_code = false, fault_addr = false;
01100 
01101   {
01102     Lock_guard <Thread_lock> guard (thread_lock());
01103 
01104     if (!(state() & Thread_ready) || (state() & Thread_cancel))
01105       return false;
01106 
01107     switch (ts->trapno)
01108       {
01109         case 0xe:
01110           fault_addr = true;
01111         case 0x8:
01112         case 0xa:
01113         case 0xb:
01114         case 0xc:
01115         case 0xd:
01116         case 0x11:
01117           error_code = true;
01118       }
01119 
01120     ts->esp -= sizeof (Mword) * (error_code ? 4 : 3);
01121     ts->eip  = handler;
01122   }
01123 
01124   space()->poke_user (sp - 1, (Mword)ts->flags());
01125   space()->poke_user (sp - 2, (Mword)exception_cs());
01126   space()->poke_user (sp - 3, (Mword)ip);
01127 
01128   if (error_code)
01129     space()->poke_user (sp - 4, (Mword)ts->err);
01130   if (fault_addr)
01131     space()->poke_user (sp - 5, (Mword)ts->cr2);
01132 
01133   /* reset single trap flag to mirror cpu correctly */
01134   if (ts->trapno == 1)
01135     ts->eflags &= ~EFLAGS_TF;
01136 
01137   return true;
01138 }
01139 
01140 
01141 
01142 inline int
01143 Thread::snd_exception(Trap_state *) const
01144 {
01145   return 0;
01146 }
01147 
01148 
01149 /*
01150  * Handle FPU trap for this context. Assumes disabled interrupts
01151  */
01152 
01153 inline void
01154 Thread::handle_fpu_trap()
01155 {
01156   // If we own the FPU, we should never be getting an "FPU unavailable" trap
01157   assert (Fpu::owner() != this);
01158 
01159   // Enable the FPU before accessing it, otherwise recursive trap
01160   Fpu::enable();
01161 
01162   // Save the FPU state of the previous FPU owner (lazy) if applicable
01163   if (Fpu::owner()) {
01164     Fpu::owner()->state_change_dirty (~Thread_fpu_owner, 0);
01165     Fpu::save_state (Fpu::owner()->fpu_state());
01166   }
01167 
01168   // Allocate FPU state slab if we didn't already have one
01169   if (!fpu_state()->state_buffer())
01170     Fpu_alloc::alloc_state (fpu_state());
01171 
01172   // Become FPU owner and restore own FPU state
01173   state_change_dirty (~0U, Thread_fpu_owner);
01174   Fpu::restore_state (fpu_state());
01175 
01176   Fpu::set_owner (this);
01177 }
01178 
01179 
01180 
01181 inline void
01182 Thread::set_int3_handler(int (*handler)(Trap_state *ts))
01183 {
01184   int3_handler = handler;
01185 }
01186 
01187 
01188 
01189 inline Thread *
01190 Thread::next_present() const
01191 {
01192   return present_next;
01193 }
01194 
01195 
01196 
01197 inline int
01198 Thread::log_page_fault()
01199 {
01200   return Jdb_pf_trace::log();
01201 }
01202 
01203 // Note that we don't want to check for Thread_invalid since we don't want
01204 // to raise page faults from inside the kernel debugger
01205 
01206 inline int
01207 Thread::is_mapped()
01208 {
01209   return Kmem::virt_to_phys((void*)this) != (Address)-1;
01210 }
01211 
01212 
01213 
01214 inline Task_num
01215 Thread::get_task(Global_id id)
01216 { return id.task(); }
01217 
01218 
01237 inline void             
01238 Thread::reset_ipc_window()
01239 {                
01240   _vm_window1 = _vm_window0 = (Address) -1;
01241 }
01242 
01243 
01255 inline void             
01256 Thread::setup_ipc_window(unsigned win, Address address)
01257 {                
01258   if (win == 0)
01259     {
01260       // Useless, because _vm_window0 contains the always the message buffer,
01261       // which we setup for every long ipc only once.
01262       // And because every thread switch flushes always the IPC window.
01263       // we need to update _vm_window0 every time.
01264       // This can be enabled, if we get an ipc window owner per space, 
01265       // like the fpu.
01266       // if (EXPECT_TRUE (_vm_window0 == address))
01267       //   return;
01268       _vm_window0 = address;
01269     }
01270   else
01271     {
01272       // Only makes sense if some strings are in the same 4mb region.
01273       if (EXPECT_FALSE (_vm_window1 == address))
01274         return;
01275       _vm_window1 = address;
01276     }
01277 
01278   // Pull in the mappings for the entire 8 MB window, by copying 2 PDE slots,
01279   // thereby replacing the old ones, based on the optimistic assumption that
01280   // the receiver's mappings are already set up appropriately. Note that this
01281   // does not prevent a pagefault on either of these mappings later on, e.g.
01282   // if the receiver's mapping is r/o here and needs to be r/w for Long-IPC.
01283   // Careful: for SMAS current_space() != space()
01284   current_space()->remote_update (Kmem::ipc_window (win),
01285                                   receiver()->space(), address, 2);
01286 }
01287 
01288 
01293 inline bool 
01294 Thread::in_present_list()
01295 {
01296   return present_next;
01297 }
01298 
01299 
01305 inline void
01306 Thread::present_enqueue(Thread *sibling)
01307 {
01308   Lock_guard<Cpu_lock> guard (&cpu_lock);
01309 
01310   if (! in_present_list())
01311     {
01312       present_next = sibling->present_next;
01313       present_prev = sibling;
01314       sibling->present_next = this;
01315       present_next->present_prev = this;
01316     }
01317 }
01318 
01319 
01324 inline void
01325 Thread::present_dequeue()
01326 {
01327   Lock_guard<Cpu_lock> guard (&cpu_lock);
01328 
01329   if (in_present_list())
01330     {
01331       present_prev->present_next = present_next;
01332       present_next->present_prev = present_prev;
01333       present_next /* = present_prev */ = 0;
01334     }
01335 }
01336 
01337 
01338 
01339 inline void
01340 Thread::copy_utcb_to(Thread*)
01341 {}
01342 
01343 
01350 inline void
01351 Thread::unlock_receiver(Receiver *receiver, const Sys_ipc_frame*)
01352 {
01353   receiver->ipc_unlock();
01354 }
01355 
01356 
01357 
01358 inline void
01359 Thread::wake_receiver(Thread *receiver)
01360 {
01361   // If neither IPC partner is delayed, just update the receiver's state
01362   if (EXPECT_TRUE (!((state() | receiver->state()) & Thread_delayed_ipc)))
01363     {
01364       receiver->state_change (~Thread_ipc_mask, Thread_ready);
01365       return;
01366     }
01367 
01368   // Critical section if either IPC partner is delayed until its next period
01369   Lock_guard <Cpu_lock> guard (&cpu_lock);
01370 
01371   // Sender has no receive phase and deadline timeout already hit
01372   if ( (state() & (Thread_receiving |
01373                    Thread_delayed_deadline | Thread_delayed_ipc)) ==
01374       Thread_delayed_ipc)
01375     {
01376       state_change_dirty (~Thread_delayed_ipc, 0);
01377       switch_sched (sched_context()->next());
01378       _deadline_timeout.set (Timer::system_clock() + period());
01379     }
01380 
01381   // Receiver's deadline timeout already hit
01382   if ( (receiver->state() & (Thread_delayed_deadline |
01383                              Thread_delayed_ipc) ==   
01384                              Thread_delayed_ipc))      
01385     {
01386       receiver->state_change_dirty (~Thread_delayed_ipc, 0);   
01387       receiver->switch_sched (receiver->sched_context()->next());
01388       receiver->_deadline_timeout.set (Timer::system_clock() +
01389                                        receiver->period());
01390     }
01391 
01392   receiver->state_change_dirty (~(Thread_ipc_mask|Thread_delayed_ipc),
01393                                 Thread_ready);
01394 }
01395 
01396 
01397 
01398 inline Thread *
01399 Thread::lookup(L4_uid id, Space*)
01400 { return lookup ((Global_id) id); }
01401 
01402 
01403 
01404 inline void
01405 Thread::kill_all()
01406 {
01407   kill_task (Space_index (Config::boot_id.task()));
01408 }
01409 
01410 
01411 inline Task_num Thread::d_taskno() { return space_index(); }
01412 
01413 inline LThread_num Thread::d_threadno() { return id().lthread(); }
01414 
01415 
01416 
01417 inline bool Thread::handle_smas_page_fault( Address, Mword, Ipc_err & )
01418 { return false; }
01419 
01420 
01421 
01422 inline void Thread::kill_small_space(void)
01423 {}
01424 
01425 
01426 
01427 inline Mword Thread::small_space( void )
01428 { return 0; }
01429 
01430 
01431 
01432 inline void Thread::set_small_space( Mword /*nr*/)
01433 {}
01434 
01435 
01436 
01437 inline void
01438 Thread::rcv_startup_msg()
01439 {}
01440 
01441 
01442 
01443 inline Mword
01444 Thread::is_tcb_mapped() const
01445 {
01446   // Touch the state to page in the TCB. If we get a pagefault here,
01447   // the handler doesn't handle it but returns immediatly after
01448   // setting eax to 0xffffffff
01449   Mword pagefault_if_0;
01450   asm volatile ("xorl %%eax,%%eax               \n\t"
01451                 "andl $0xffffffff, %%ss:(%%ecx) \n\t"
01452                 "setnc %%al                     \n\t"
01453                 : "=a" (pagefault_if_0) : "c" (&_state));
01454   return pagefault_if_0;
01455 }
01456 
01457 
01458 // 
01459 // Public services
01460 // 
01461 
01462 inline bool
01463 Thread::handle_sigma0_page_fault(Address pfa)
01464 {
01465   size_t size;
01466 
01467   // Check if mapping a superpage doesn't exceed the size of physical memory
01468   if (Cpu::have_superpages() &&
01469       (pfa & Config::SUPERPAGE_MASK) + Config::SUPERPAGE_SIZE <
01470        Kip::k()->main_memory_high())
01471     {
01472       pfa &= Config::SUPERPAGE_MASK;
01473       size = Config::SUPERPAGE_SIZE;
01474     }
01475   else
01476     {
01477       pfa &= Config::PAGE_MASK;
01478       size = Config::PAGE_SIZE;
01479     }
01480 
01481   return space()->v_insert (pfa, pfa, size,
01482                             Space::Page_writable |
01483                             Space::Page_user_accessible)
01484                          != Space::Insert_err_nomem;
01485 }
01486 
01487 
01488 
01489 inline Mword
01490 Thread::update_ipc_window(Address pfa, Address remote_pfa, Mword error_code)
01491 {
01492   Space *remote = receiver()->space();
01493 
01494   // If the remote address space has a mapping for the page fault address and
01495   // it is writable or we didn't want to write to it, then we can simply copy
01496   // the mapping into our address space via space()->remote_update().
01497   // Otherwise return 0 to trigger a pagein_request upstream.
01498 
01499   if (EXPECT_TRUE (remote->mapped (remote_pfa, (error_code & PF_ERR_WRITE))))
01500     {
01501       //careful: for SMAS current_space() != space()
01502       current_space()->remote_update (pfa, remote, remote_pfa, 1);
01503 
01504       // It's OK if the PF occurs again: This can happen if we're
01505       // preempted after the call to remote_update() above.  (This code
01506       // corresponds to code in Thread::handle_page_fault() that
01507       // checks for double page faults.)
01508 
01509       if (Config::monitor_page_faults)
01510         _last_pf_address = (Address) -1;
01511 
01512       return 1;
01513     }
01514 
01515   return 0;
01516 }
01517 
01518 
01519 
01520 inline Mword const
01521 Thread::exception_cs()
01522 {
01523   return Gdt::gdt_code_user | Gdt::Selector_user;
01524 }
01525 
01526 
01527 
01528 inline bool
01529 Thread::handle_smas_gp_fault()
01530 {
01531   return false;
01532 }
01533 
01534 
01537 inline void
01538 Thread::sys_fpage_unmap()
01539 {
01540   Sys_unmap_frame *regs = sys_frame_cast<Sys_unmap_frame>(this->regs());
01541 
01542   fpage_unmap (space(), regs->fpage(), regs->self_unmap(), regs->downgrade());
01543 }
01544 
01545 
01546 
01547 inline Mword
01548 Thread::ipc_send_regs(Thread* receiver, Sys_ipc_frame const *sender_regs)
01549 {
01550   Mword ret;
01551 
01552   ret = receiver->ipc_try_lock (nonull_static_cast<Sender*>(current_thread()));
01553 
01554   if (EXPECT_FALSE (ret))
01555     {
01556       if ((Smword) ret < 0)
01557         return 0x80000000;      // transient error -- partner not ready
01558 
01559       return ret;
01560     }
01561 
01562   if (EXPECT_FALSE (state() & Thread_cancel))
01563     {
01564       receiver->ipc_unlock();
01565       return Ipc_err::Secanceled;
01566     }
01567 
01568   receiver->ipc_init(nonull_static_cast<Sender*>(current_thread()));
01569 
01570   if (!Config::deceit_bit_disables_switch && sender_regs->snd_desc().deceite())
01571     panic ("deceiving ipc");    // XXX unimplemented
01572 
01573   Sys_ipc_frame* dst_regs = receiver->rcv_regs();
01574   const L4_msgdope ret_dope(Sys_ipc_frame::num_reg_words(), 0);
01575 
01576   ret = 0;                              // status code: IPC successful
01577   dst_regs->msg_dope (ret_dope);        // status code: rcv'd 2 dw
01578 
01579   // dequeue from sender queue if enqueued
01580   sender_dequeue (receiver->sender_list());
01581 
01582   // Reset sender's timeout, if any.  Once we're here, we don't want
01583   // the timeout to reset the sender's ipc_in_progress state bit
01584   // (which it still needs for a subsequent receive operation).
01585   Timeout *t = _timeout;
01586   if (EXPECT_FALSE (t != 0))
01587     {
01588       t->reset();
01589       if (t->has_hit())         // too late?
01590         {
01591           // Fix: re-set the Thread_ipc_in_progress flag.  The
01592           // following algorithm makes sure we only set this flag if
01593           // Thread_cancel has not been set in-between.
01594           state_add (Thread_ipc_in_progress);
01595           if (state() & Thread_cancel)
01596             state_del (Thread_ipc_in_progress);
01597         }
01598     }
01599 
01600   // copy message register contents
01601   sender_regs->copy_msg (dst_regs);
01602 
01603   copy_utcb_to(receiver);
01604 
01605   // copy sender ID
01606   dst_regs->rcv_src (id());
01607 
01608   // disallow lipc of the receiver
01609   receiver->deny_lipc();
01610 
01611   // is this a fast (registers-only) receive operation?
01612   // the following operations can be short-cut here:
01613   // - short message sender to short message receiver
01614   // - (short) register message sender to long message receiver
01615   // the following operations can not be short-cut as we're not allowed
01616   // to touch user memory:
01617   // - long message sender to long message receiver (obviously)
01618   // - long message sender to short message receiver
01619   //   (the long message might actually fit into a short message)
01620   // - short-flexpage message sender to long message receiver
01621   //   (because the rcvr's msg buffer may contain a flexpage option)
01622 
01623   // There's a possible optimization for
01624   // short-flexpage-to-long-message-buffer transfers; see the top of
01625   // this file.
01626 
01627   if (EXPECT_TRUE (!sender_regs->snd_desc().is_long_ipc())
01628         // sender wants short register IPC
01629       || EXPECT_TRUE (sender_regs->snd_desc().msg() == 0
01630                        // sender wants short IPC
01631                        && (dst_regs->rcv_desc().is_register_ipc()
01632                             // receiver wants register IPC
01633                            || dst_regs->rcv_desc().rmap()
01634                                // receiver wants short flexpage
01635                          )
01636         ))
01637     {
01638       // short IPC!
01639 
01640       if (sender_regs->snd_desc().map()) // send short flexpage?
01641         {
01642           if (EXPECT_FALSE (! dst_regs->rcv_desc().rmap()) )
01643               // rcvr not expecting an fpage?
01644             {
01645               dst_regs->msg_dope_set_error(Ipc_err::Remsgcut);
01646               ret = Ipc_err::Semsgcut;
01647             }
01648           else
01649             {
01650               dst_regs->msg_dope_combine(
01651                 fpage_map(space(),
01652                           L4_fpage(sender_regs->msg_word(1)),
01653                           receiver->space(), dst_regs->rcv_desc().fpage(),
01654                           sender_regs->msg_word(0) & Config::PAGE_MASK,
01655                           L4_fpage(sender_regs->msg_word(1)).grant()));
01656 
01657               if (dst_regs->msg_dope().rcv_map_failed())
01658                 ret = Ipc_err::Semapfailed;
01659             }
01660         }
01661 
01662       /* set the source local-id */
01663       set_source_local_id(receiver, dst_regs);
01664 
01665       // (If we don't send a flexpage in our message, that's OK even
01666       // if the receiver expected one.  The receiver can tell from the
01667       // status code that he didn't receive one.)
01668 
01669       // IPC done -- reset states
01670 
01671       // After this point we are able to receive!
01672       // Make sure that we are not if there was a send error.
01673       state_del ((ret != 0 ? Thread_ipc_in_progress : 0) |
01674                   Thread_polling | Thread_send_in_progress);
01675 
01676       wake_receiver (receiver);
01677 
01678       if (!Config::deceit_bit_disables_switch ||
01679           !sender_regs->snd_desc().deceite())
01680           receiver->thread_lock()->set_switch_hint(SWITCH_ACTIVATE_LOCKEE);
01681     }
01682   else
01683     {
01684       // prepare long IPC
01685 
01686       // XXX check for cancel -- sender?  receiver?
01687 
01688       _target_desc = dst_regs->rcv_desc(); //ebp & ~1;
01689 
01690       // If the receive timeout has hit, ignore it.  We do this by
01691       // overwriting the Thread_ipc_in_process flag which the timeout
01692       // may have deleted -- but only if the Thread_cancel flag has
01693       // not been set in-between.
01694 
01695       receiver->reset_timeout();
01696 
01697       // set up page-fault timeouts
01698       L4_timeout t = sender_regs->timeout();
01699       if (t.snd_pfault() == 15) // send pfault timeout == 0 ms?
01700         receiver->_pf_timeout = L4_timeout(0,1,0,1,0,0);
01701       else
01702         {
01703           receiver->_pf_timeout = L4_timeout(1, t.snd_pfault(),
01704                                              1, t.snd_pfault(), 0, 0);
01705           // XXX should normalize timeout spec, but do_send/do_receive
01706           // can cope with non-normalized numbers.
01707         }
01708 
01709       t = dst_regs->timeout();
01710       if (t.rcv_pfault() == 15) // rcv pfault timeout == 0 ms?
01711         _pf_timeout = L4_timeout(0,1,0,1,0,0);
01712       else
01713         {
01714           _pf_timeout = L4_timeout(1, t.rcv_pfault(), 1, t.rcv_pfault(), 0, 0);
01715           // XXX should normalize timeout spec, but do_send/do_receive
01716           // can cope with non-normalized numbers.
01717         }
01718 
01719       // set up return code in case we're aborted
01720       dst_regs->msg_dope_set_error(Ipc_err::Reaborted);
01721 
01722       // switch receiver's state, and put it to sleep.
01723       // overwrite ipc_in_progress flag a timeout may have deleted --
01724       // see above
01725       receiver->state_change(~(Thread_receiving | Thread_busy | Thread_ready),
01726                              Thread_rcvlong_in_progress
01727                              | Thread_ipc_in_progress);
01728       if (receiver->state() & Thread_cancel)
01729           receiver->state_change(~Thread_ipc_in_progress, Thread_ready);
01730 
01731       // in the sender, send_in_progress must remain set
01732       state_del(Thread_polling);
01733     }
01734 
01735   unlock_receiver(receiver, sender_regs);
01736   return ret;
01737 }
01738 
01739 
01740 
01741 inline void
01742 Thread::page_fault_msg(Sys_ipc_frame &r, Address fault_address,
01743                         Mword fault_eip, Mword error_code)
01744 {
01745   r.set_msg_word (0, PF::addr_to_msgword0 (fault_address, error_code));
01746   r.set_msg_word (1, PF::pc_to_msgword1 (fault_eip, error_code));
01747   r.set_msg_word (2, 0); // nop in V2
01748   r.snd_desc (0);       // short msg
01749   r.rcv_desc(L4_rcv_desc::short_fpage(L4_fpage(0,0,L4_fpage::Whole_space,0)));
01750 };
01751 
01752 
01753 
01754 inline void
01755 Thread::commit_ipc_success(Sys_ipc_frame *regs, Ipc_err err)
01756 {
01757   regs->msg_dope (regs->msg_dope().raw_dope() | err.raw());
01758 }
01759 
01760 
01761 
01762 inline void
01763 Thread::commit_ipc_failure(Sys_ipc_frame *regs, Ipc_err err)
01764 {
01765   state_del (Thread_delayed_deadline | Thread_delayed_ipc);
01766   regs->msg_dope (0);
01767   commit_ipc_success (regs, err);
01768 }
01769 
01770 
01771 
01772 inline Ipc_err
01773 Thread::get_ipc_err(Sys_ipc_frame *regs)
01774 {
01775   return Ipc_err (regs->msg_dope().raw());
01776 }
01777 
01781 inline void
01782 Thread::set_source_local_id(Thread *, Sys_ipc_frame *)
01783 {}
01784 
01785 
01786 
01787 inline Address
01788 Thread::remote_fault_addr(Address pfa)
01789 {
01790   return pfa < Kmem::ipc_window(1) ? pfa - Kmem::ipc_window(0) + _vm_window0
01791                                    : pfa - Kmem::ipc_window(1) + _vm_window1;
01792 }
01793 
01794 
01795 
01810 inline bool Thread::handle_page_fault(Address pfa, Mword error_code, Mword pc)
01811 {
01812   if (Config::Log_kernel_page_faults && !PF::is_usermode_error(error_code))
01813     {
01814       printf("*KP[%lx,", pfa);
01815       print_page_fault_error(error_code);
01816       printf(",%lx]\n", pc);
01817     }
01818 #if 0
01819   printf("Translation error ? %x\n"
01820          "  current space has mapping : %08x\n"
01821          "  Kernel space has mapping  : %08x\n",
01822          PF::is_translation_error(error_code),
01823          current_space()->lookup((void*)pfa),
01824          Space::kernel_space()->lookup((void*)pfa));
01825 #endif
01826 
01827   if (Config::monitor_page_faults)
01828     {
01829       if (_last_pf_address == pfa && _last_pf_error_code == error_code)
01830         {
01831           if (!log_page_fault())
01832             printf("*P[%lx,%lx,%lx]", pfa, error_code & 0xffff, pc);
01833           putchar('\n');
01834 
01835           kdb_ke("PF happened twice");
01836         }
01837 
01838       _last_pf_address = pfa;
01839       _last_pf_error_code = error_code;
01840 
01841       // (See also corresponding code in Thread::handle_ipc_page_fault()
01842       //                          and in Thread::handle_slow_trap.)
01843     }
01844 
01845   CNT_PAGE_FAULT;
01846 
01847   // TODO: put this into a debug_page_fault_handler
01848   if (EXPECT_FALSE (log_page_fault()))
01849     page_fault_log (pfa, error_code, pc);
01850 
01851   Ipc_err ipc_code(0);
01852 
01853   // Check for page fault in user memory area
01854   if (EXPECT_TRUE (!Kmem::is_kmem_page_fault(pfa, error_code)))
01855     {
01856       // Make sure that we do not handle page faults that do not
01857       // belong to this thread.
01858       assert (space() == current_space());
01859 
01860       if (EXPECT_FALSE (space()->is_sigma0()))
01861         {
01862           // special case: sigma0 can map in anything from the kernel
01863           if(handle_sigma0_page_fault(pfa))
01864             return true;
01865 
01866           ipc_code.error (Ipc_err::Remapfailed);
01867           goto error;
01868         }
01869 
01870       // user mode page fault -- send pager request
01871       if (!(ipc_code = handle_page_fault_pager(pfa, error_code)).has_error())
01872         return true;
01873 
01874       goto error;
01875     }
01876 
01877   // Check for page fault in small address space
01878   else if (EXPECT_FALSE (Kmem::is_smas_page_fault(pfa)))
01879     {
01880       if (handle_smas_page_fault (pfa, error_code, ipc_code))
01881         return true;
01882 
01883       goto error;
01884     }
01885 
01886   // Check for page fault in kernel memory region caused by user mode
01887   else if (EXPECT_FALSE(PF::is_usermode_error(error_code)))
01888     return false;             // disallow access after mem_user_max
01889 
01890   // Check for page fault in IO bit map or in delimiter byte behind IO bitmap
01891   // assume it is caused by an input/output instruction and fall through to
01892   // handle_slow_trap
01893   else if (EXPECT_FALSE(Kmem::is_io_bitmap_page_fault(pfa)))
01894     return false;
01895 
01896   // We're in kernel code faulting on a kernel memory region
01897 
01898   // Check for page fault in IPC window. Mappings for this should never
01899   // be present in the global master page dir (case below), because the
01900   // IPC window mappings are always flushed on context switch.
01901   else if (EXPECT_TRUE (Kmem::is_ipc_page_fault(pfa, error_code)))
01902     {
01903       if (!(ipc_code = handle_ipc_page_fault(pfa, error_code)).has_error())
01904         return true;
01905 
01906       goto error;
01907     }
01908 
01909   // A page is not present but a mapping exists in the global page dir.
01910   // Update our page directory by copying from the master pdir
01911   // This is the only path that should be executed with interrupts
01912   // disabled if the page faulter also had interrupts disabled.   
01913   // thread_page_fault() takes care of that.
01914   else 
01915 #ifdef CONFIG_ARM
01916   if (PF::is_translation_error(error_code) &&
01917       Space::kernel_space()->lookup((void*)pfa) != (Mword) -1)
01918 #else
01919   if (PF::is_translation_error(error_code) &&
01920       Kmem::virt_to_phys (reinterpret_cast<void*>(pfa)) != (Mword) -1)
01921 #endif
01922     {
01923       current_space()->kmem_update((void*)pfa);
01924       return true;
01925     }
01926 
01927   // Check for page fault in kernel's TCB area
01928   else if (Kmem::is_tcb_page_fault(pfa, error_code))
01929     {
01930       if (PF::is_translation_error(error_code))   // page not present
01931         {
01932           if (!PF::is_read_error(error_code))
01933             Proc::sti();
01934 
01935           if (!Vmem_alloc::page_alloc((void*)(pfa & Config::PAGE_MASK),
01936                                       PF::is_read_error(error_code) ?
01937                                       Vmem_alloc::ZERO_MAP:
01938                                       Vmem_alloc::ZERO_FILL)) 
01939             panic("can't alloc kernel page");
01940         }
01941       else
01942         { 
01943           // protection fault
01944           // this can only be because we have the zero page mapped
01945           Proc::sti();
01946           Vmem_alloc::page_free
01947             (reinterpret_cast<void*> (pfa & Config::PAGE_MASK));
01948           if (! Vmem_alloc::page_alloc((void*)(pfa & Config::PAGE_MASK),
01949                                        Vmem_alloc::ZERO_FILL))
01950             {
01951               // error could mean: someone else was faster allocating
01952               // a page there, or we just don't have any pages left; verify
01953 #ifdef CONFIG_ARM
01954               if (Space::kernel_space()->lookup((void*)pfa) != (Mword) -1)
01955                 panic("can't alloc kernel page");
01956 #else
01957               if (Kmem::virt_to_phys(reinterpret_cast<void*>(pfa)) 
01958                   == Mword (-1))
01959                 panic("can't alloc kernel page");
01960 #endif
01961 
01962               // otherwise, there's a page mapped.  continue
01963             }
01964         }
01965 
01966       current_space()->kmem_update((void*)pfa);
01967       return true;
01968     }
01969 
01970   WARN("No page fault handler for 0x%lx, error 0x%lx, pc "L4_PTR_FMT"",
01971         pfa, error_code, pc);
01972 
01973   // An error occurred.  Our last chance to recover is an exception
01974   // handler a kernel function may have set.
01975  error:
01976 
01977   if (_recover_jmpbuf)
01978     longjmp (*_recover_jmpbuf, ipc_code.raw());
01979 
01980   return false;
01981 }
01982 
01983 
01984 
01985 inline Pf_msg_utcb_saver::Pf_msg_utcb_saver(const Utcb *)
01986 {}
01987 
01988 
01989 
01990 inline void
01991 Pf_msg_utcb_saver::restore(Utcb *)
01992 {}
01993 
01994 
01999 inline Thread*
02000 current_thread()
02001 {
02002   return Thread::lookup(current());
02003 }
02004 
02005 #endif // thread_h

Generated on Mon Sep 26 14:20:12 2005 for Fiasco by  doxygen 1.4.2