Main Page | Modules | Namespace List | Class Hierarchy | Class List | Directories | File List | Namespace Members | Class Members | File Members

context.h

Go to the documentation of this file.
00001 // AUTOMATICALLY GENERATED -- DO NOT EDIT!         -*- c++ -*-
00002 
00003 #ifndef context_h
00004 #define context_h
00005 
00006 #include "types.h"
00007 #include "config.h"
00008 #include "fpu_state.h"
00009 #include "sched_context.h"
00010 
00011 //
00012 // INTERFACE definition follows 
00013 //
00014 
00015 
00016 class Entry_frame;
00017 class Space;
00018 class Thread_lock;
00019 
00024 class Context
00025 {
00026   friend class Jdb_thread_list;
00027 
00028 public:
00029 
00033   enum Sched_mode {
00034     Periodic    = 0x1,  
00035     Nonstrict   = 0x2,  
00036   };
00037 
00041   enum Helping_mode {
00042     Helping,
00043     Not_Helping,
00044     Ignore_Helping
00045   };
00046 
00050   static const size_t size = Config::thread_block_size;
00051 
00055   void init_switch_time();
00056 
00061   Cpu_time consumed_time();
00062 
00067   Utcb* utcb() const;
00068 
00069 protected:
00074   void update_consumed_time();
00075 
00076   Mword                 _state;
00077   Mword   *             _kernel_sp;
00078 
00079 private:
00080   friend class Jdb;
00081   friend class Jdb_tcb;
00082 
00084   void switchin_context() asm ("switchin_context_label") FIASCO_FASTCALL;
00085 
00087   void switch_fpu (Context *t);
00088 
00090   void switch_cpu (Context *t);
00091 
00092   Space *               _space;
00093   Context *             _donatee;
00094   Context *             _helper;
00095 
00096   // Lock state
00097   // how many locks does this thread hold on other threads
00098   // incremented in Thread::lock, decremented in Thread::clear
00099   // Thread::kill needs to know
00100   int                   _lock_cnt;
00101   Thread_lock * const   _thread_lock;
00102 
00103   // The scheduling parameters.  We would only need to keep an
00104   // anonymous reference to them as we do not need them ourselves, but
00105   // we aggregate them for performance reasons.
00106   Sched_context         _sched_context;
00107   Sched_context *       _sched;
00108   Unsigned64            _period;
00109   Sched_mode            _mode;
00110   unsigned short        _mcp;
00111 
00112   // Pointer to floating point register state
00113   Fpu_state             _fpu_state;
00114 
00115   // Implementation-specific consumed CPU time (TSC ticks or usecs)
00116   Cpu_time              _consumed_time;
00117 
00118   Context *             _ready_next;
00119   Context *             _ready_prev;
00120 
00121   static bool           _schedule_in_progress;
00122   static Sched_context *_current_sched          asm ("CONTEXT_CURRENT_SCHED");
00123   static Cpu_time       _switch_time            asm ("CONTEXT_SWITCH_TIME");
00124   static unsigned       _prio_highest           asm ("CONTEXT_PRIO_HIGHEST");
00125   static Context *      _prio_next[256]         asm ("CONTEXT_PRIO_NEXT");
00126 private:
00127 
00128 protected:
00129   // make sure that there is no virtual function
00130   void send_activation (unsigned);
00131 
00132 public:  
00141   inline Context(Thread_lock *thread_lock, Space* space, unsigned short prio, unsigned short mcp, Unsigned64 quantum);
00142   
00145   virtual ~Context();
00146   
00151   inline Mword state() const;
00152   
00157   inline Mword exists() const;
00158   
00164   inline void state_add(Mword const bits);
00165   
00171   inline void state_del(Mword const bits);
00172   
00182   inline Mword state_change_safely(Mword const mask, Mword const bits);
00183   
00189   inline Mword state_change(Mword const mask, Mword const bits);
00190   
00198   inline void state_change_dirty(Mword const mask, Mword const bits);
00199   
00204   inline Space * space() const;
00205   
00209   inline Thread_lock * const thread_lock() const;
00210   
00211   inline unsigned short const mcp() const;
00212   
00216   inline Entry_frame * const regs() const;
00217   
00220   inline void inc_lock_cnt();
00221   
00225   inline void dec_lock_cnt();
00226   
00230   inline int const lock_cnt() const;
00231   
00233   
00238   void switch_sched(Sched_context * const next);
00239   
00243   void schedule();
00244   
00248   static inline bool const schedule_in_progress();
00249   
00253   static inline bool const can_preempt_current(Sched_context const * const s);
00254   
00258   static inline Sched_context * const current_sched();
00259   
00264   inline Sched_context * sched_context(unsigned short const id = 0);
00265   
00270   inline Sched_context * sched() const;
00271   
00276   inline Unsigned64 const period() const;
00277   
00282   inline Context::Sched_mode const mode() const;
00283   
00288   inline void set_mode(Context::Sched_mode const mode);
00289   
00294   inline Mword const in_ready_list() const;
00295   
00299   void ready_enqueue();
00300   
00304   inline void ready_dequeue();
00305   
00310   inline Context * const helper() const;
00311   
00312   inline void set_helper(enum Helping_mode const mode);
00313   
00319   inline Context * const donatee() const;
00320   
00321   inline void set_donatee(Context * const donatee);
00322   
00323   inline Mword * const get_kernel_sp() const;
00324   
00325   inline void set_kernel_sp(Mword * const esp);
00326   
00327   inline Fpu_state * fpu_state();
00328   
00333   inline void consume_time(Cpu_time const quantum);
00334   
00339   inline void switch_to(Context *t);
00340   
00346   inline void switch_to_locked(Context *t);
00347   
00351   inline void switch_exec(Context *t, enum Helping_mode mode);
00352   
00362   void switch_exec_locked(Context *t, enum Helping_mode mode);
00363   
00364   GThread_num gthread_calculated();
00365   
00366   inline LThread_num lthread_calculated();
00367   
00368   inline LThread_num task_calculated();
00369   
00370   inline void update_kip_switch_time(Context * t);
00371 
00372 protected:  
00376   static void set_current_sched(Sched_context * const sched);
00377   
00381   static void invalidate_sched();
00382   
00387   inline void set_sched(Sched_context * const sched);
00388   
00393   inline void set_period(Unsigned64 const period);
00394   
00395   inline void switch_gdt_tls();
00396   
00397   inline void load_segments();
00398   
00399   inline void store_segments();
00400 
00401 private:  
00405   inline void update_ready_list();
00406 };
00407 
00408 inline void update_utcb_ptr();
00409 
00410 //
00411 // IMPLEMENTATION includes follow (for use by inline functions)
00412 //
00413 
00414 #include "atomic.h"
00415 #include "cpu_lock.h"
00416 #include "entry_frame.h"
00417 #include "std_macros.h"
00418 
00419 #include "l4_types.h"
00420 
00421 #include <cassert>
00422 
00423 #include "cpu.h"
00424 #include "globals.h"            // current()
00425 #include "lock_guard.h"
00426 #include "thread_state.h"
00427 #include "kip.h"
00428 
00429 #include "fpu.h"
00430 
00431 //
00432 // IMPLEMENTATION of inline functions (and needed classes)
00433 //
00434 
00435 
00436 
00446 inline Context::Context(Thread_lock *thread_lock,
00447                   Space* space,
00448                   unsigned short prio,
00449                   unsigned short mcp,
00450                   Unsigned64 quantum)
00451        : _space     (space),
00452          _helper            (this),
00453          _thread_lock       (thread_lock),
00454          _sched_context     (this, 0, prio, quantum),
00455          _sched             (&_sched_context),
00456          _period            (0),
00457          _mode              (Sched_mode (0)),
00458          _mcp               (mcp),
00459          _consumed_time     (0)
00460 {
00461   // NOTE: We do not have to synchronize the initialization of
00462   // _space_context because it is constant for all concurrent
00463   // invocations of this constructor.  When two threads concurrently
00464   // try to create a new task, they already synchronize in
00465   // sys_task_new() and avoid calling us twice with different
00466   // space_context arguments.
00467 
00468   Mword *init_sp = reinterpret_cast<Mword*>
00469     (reinterpret_cast<Mword>(this) + size - sizeof (Entry_frame));
00470 
00471   // don't care about errors: they just mean someone else has already
00472   // set up the tcb
00473 
00474   cas (&_kernel_sp, (Mword *) 0, init_sp);
00475 }
00476 
00477 
00483 inline Mword
00484 Context::state() const
00485 {
00486   return _state;
00487 }
00488 
00489 
00495 inline Mword
00496 Context::exists() const
00497 {
00498   return _state != Thread_invalid;
00499 }
00500 
00501 
00508 inline void
00509 Context::state_add(Mword const bits)
00510 {
00511   atomic_or (&_state, bits);
00512 }
00513 
00514 
00521 inline void
00522 Context::state_del(Mword const bits)
00523 {
00524   atomic_and (&_state, ~bits);
00525 }
00526 
00527 
00538 inline Mword
00539 Context::state_change_safely(Mword const mask, Mword const bits)
00540 {
00541   Mword old;
00542 
00543   do
00544     {
00545       old = _state;
00546       if (old & bits & mask | ~old & ~mask)
00547         return 0;
00548     }
00549   while (!cas (&_state, old, old & mask | bits));
00550 
00551   return 1;
00552 }
00553 
00554 
00561 inline Mword
00562 Context::state_change(Mword const mask, Mword const bits)
00563 {
00564   return atomic_change (&_state, mask, bits);
00565 }
00566 
00567 
00576 inline void
00577 Context::state_change_dirty(Mword const mask, Mword const bits)
00578 {
00579   _state &= mask;
00580   _state |= bits;
00581 }
00582 
00583 
00589 inline Space *
00590 Context::space() const
00591 {
00592   return _space;
00593 }
00594 
00595 
00600 inline Thread_lock * const
00601 Context::thread_lock() const
00602 {
00603   return _thread_lock;
00604 }
00605 
00606 
00607 
00608 inline unsigned short const
00609 Context::mcp() const
00610 {
00611   return _mcp;
00612 }
00613 
00614 
00619 inline Entry_frame * const
00620 Context::regs() const
00621 {
00622   return reinterpret_cast<Entry_frame *>
00623     (reinterpret_cast<Mword>(this) + size - sizeof(Entry_frame));
00624 }
00625 
00626 
00630 inline void
00631 Context::inc_lock_cnt()
00632 {
00633   _lock_cnt++;
00634 }
00635 
00636 
00641 inline void
00642 Context::dec_lock_cnt()
00643 {
00644   _lock_cnt--;
00645 }
00646 
00647 
00652 inline int const
00653 Context::lock_cnt() const
00654 {
00655   return _lock_cnt;
00656 }
00657 
00658 
00663 inline bool const
00664 Context::schedule_in_progress()
00665 {
00666   return _schedule_in_progress;
00667 }
00668 
00669 
00674 inline bool const
00675 Context::can_preempt_current(Sched_context const * const s)
00676 {
00677   // XXX: Workaround for missing priority boost implementation
00678   if (current()->sched()->prio() >= s->prio())
00679     return false;
00680 
00681   return !current_sched() ||
00682           current_sched()->prio() < s->prio() ||
00683           current_sched() == s;
00684 }
00685 
00686 
00691 inline Sched_context * const
00692 Context::current_sched()
00693 {
00694   return _current_sched;
00695 }
00696 
00697 
00703 inline Sched_context *
00704 Context::sched_context(unsigned short const id)
00705 {
00706   if (EXPECT_TRUE (!id))
00707     return &_sched_context;
00708 
00709   for (Sched_context *tmp = _sched_context.next();
00710       tmp != &_sched_context; tmp = tmp->next())
00711     if (tmp->id() == id)
00712       return tmp;
00713 
00714   return 0;
00715 }
00716 
00717 
00723 inline Sched_context *
00724 Context::sched() const
00725 {
00726   return _sched;
00727 }
00728 
00729 
00735 inline Unsigned64 const
00736 Context::period() const
00737 {
00738   return _period;
00739 }
00740 
00741 
00747 inline Context::Sched_mode const
00748 Context::mode() const
00749 {
00750   return _mode;
00751 }
00752 
00753 
00759 inline void
00760 Context::set_mode(Context::Sched_mode const mode)
00761 {
00762   _mode = mode;
00763 }
00764 
00765 
00771 inline Mword const
00772 Context::in_ready_list() const
00773 {
00774   return _ready_next != 0;
00775 }
00776 
00777 
00782 inline void
00783 Context::ready_dequeue()
00784 {
00785   Lock_guard <Cpu_lock> guard (&cpu_lock);
00786 
00787   // Don't dequeue threads which aren't enqueued
00788   if (EXPECT_FALSE (!in_ready_list()))
00789     return;
00790 
00791   unsigned short prio = sched()->prio();
00792 
00793   if (_prio_next[prio] == this)
00794     _prio_next[prio] = _ready_next == this ? 0 : _ready_next;
00795 
00796   _ready_prev->_ready_next = _ready_next;
00797   _ready_next->_ready_prev = _ready_prev;
00798   _ready_next = 0;                              // Mark dequeued
00799 
00800   while (!_prio_next[_prio_highest] && _prio_highest)
00801     _prio_highest--;
00802 
00803   send_activation (1); // Send block message
00804 }
00805 
00806 
00812 inline Context * const
00813 Context::helper() const
00814 {
00815   return _helper;
00816 }
00817 
00818 
00819 
00820 inline void
00821 Context::set_helper(enum Helping_mode const mode)
00822 {
00823   switch (mode)
00824     {
00825     case Helping:
00826       _helper = current();
00827       break;
00828     case Not_Helping:
00829       _helper = this;
00830       break;
00831     case Ignore_Helping:
00832       // don't change _helper value
00833       break;
00834     }
00835 }
00836 
00837 
00844 inline Context * const
00845 Context::donatee() const
00846 {
00847   return _donatee;
00848 }
00849 
00850 
00851 
00852 inline void
00853 Context::set_donatee(Context * const donatee)
00854 {
00855   _donatee = donatee;
00856 }
00857 
00858 
00859 
00860 inline Mword * const
00861 Context::get_kernel_sp() const
00862 {
00863   return _kernel_sp;
00864 }
00865 
00866 
00867 
00868 inline void
00869 Context::set_kernel_sp(Mword * const esp)
00870 {
00871   _kernel_sp = esp;
00872 }
00873 
00874 
00875 
00876 inline Fpu_state *
00877 Context::fpu_state()
00878 {
00879   return &_fpu_state;
00880 }
00881 
00882 
00888 inline void
00889 Context::consume_time(Cpu_time const quantum)
00890 {
00891   _consumed_time += quantum;
00892 }
00893 
00894 
00900 inline void
00901 Context::switch_to(Context *t)
00902 {
00903   // Call switch_to_locked if CPU lock is already held
00904   assert (!cpu_lock.test());
00905 
00906   // Grab the CPU lock
00907   Lock_guard <Cpu_lock> guard (&cpu_lock);
00908 
00909   switch_to_locked (t);
00910 }
00911 
00912 
00919 inline void
00920 Context::switch_to_locked(Context *t)
00921 {
00922   // Must be called with CPU lock held
00923   assert (cpu_lock.test());
00924 
00925   // Switch to destination thread's scheduling context
00926   if (current_sched() != t->sched())
00927     set_current_sched (t->sched());
00928 
00929   // XXX: IPC dependency tracking belongs here.
00930 
00931   // Switch to destination thread's execution context, no helping involved
00932   if (t != this)
00933     switch_exec_locked (t, Not_Helping);
00934 }
00935 
00936 
00941 inline void
00942 Context::switch_exec(Context *t, enum Helping_mode mode)
00943 {
00944   // Call switch_exec_locked if CPU lock is already held
00945   assert (!cpu_lock.test());
00946 
00947   // Grab the CPU lock
00948   Lock_guard <Cpu_lock> guard (&cpu_lock);
00949 
00950   switch_exec_locked (t, mode);
00951 }
00952 
00953 
00954 
00955 inline LThread_num
00956 Context::lthread_calculated()
00957 {
00958   return gthread_calculated() % L4_uid::threads_per_task();
00959 }
00960 
00961 
00962 
00963 inline LThread_num
00964 Context::task_calculated()
00965 {
00966   return gthread_calculated() / L4_uid::threads_per_task();
00967 }
00968 
00969 
00970 
00971 inline void
00972 Context::update_kip_switch_time(Context * t)
00973 {
00974   if (Config::fine_grained_cputime)
00975     {
00976       Kip::k()->switch_time = _switch_time;
00977       Kip::k()->thread_time = t->_consumed_time;
00978     }
00979 }
00980 
00981 
00987 inline void
00988 Context::set_sched(Sched_context * const sched)
00989 {
00990   _sched = sched;
00991 }
00992 
00993 
00999 inline void
01000 Context::set_period(Unsigned64 const period)
01001 {
01002   _period = period;
01003 }
01004 
01005 
01006 
01007 inline void
01008 Context::switch_gdt_tls()
01009 {}
01010 
01011 
01012 
01013 inline void
01014 Context::load_segments()
01015 {}
01016 
01017 
01018 
01019 inline void
01020 Context::store_segments()
01021 {}
01022 
01023 
01024 
01025 inline Utcb *
01026 Context::utcb() const
01027 {
01028   return 0;
01029 }
01030 
01031 
01032 
01033 inline void
01034 Context::send_activation(unsigned)
01035 {}
01036 
01037 
01038 
01039 inline void
01040 Context::init_switch_time()
01041 {
01042   if (Config::fine_grained_cputime)
01043     _switch_time = Cpu::rdtsc();
01044 }
01045 
01046 
01052 inline void
01053 Context::update_consumed_time()
01054 {
01055   if (Config::fine_grained_cputime)
01056     {
01057       Cpu_time tsc = Cpu::rdtsc();
01058       consume_time (tsc - _switch_time);
01059       _switch_time = tsc;
01060     }
01061 }
01062 
01063 
01064 
01065 inline void
01066 Context::switch_cpu(Context *t)
01067 {
01068   Mword dummy1, dummy2, dummy3;
01069 
01070   update_consumed_time();
01071   update_kip_switch_time(t);
01072 
01073   store_segments();
01074 
01075   asm volatile
01076     (
01077      "   pushl %%ebp                    \n\t"   // save base ptr of old thread
01078      "   pushl $1f                      \n\t"   // restart addr to old stack
01079      "   movl  %%esp, (%0)              \n\t"   // save stack pointer
01080      "   movl  (%1), %%esp              \n\t"   // load new stack pointer
01081                                                 // in new context now (cli'd)
01082      "   movl  %2, %%eax                \n\t"   // new thread's "this"
01083      "   call  switchin_context_label   \n\t"   // switch pagetable
01084      "   popl  %%eax                    \n\t"   // don't do ret here -- we want
01085      "   jmp   *%%eax                   \n\t"   // to preserve the return stack
01086                                                 // restart code
01087      "  .p2align 4                      \n\t"   // start code at new cache line
01088      "1: popl %%ebp                     \n\t"   // restore base ptr
01089 
01090      : "=c" (dummy1), "=S" (dummy2), "=D" (dummy3)
01091      : "c" (&_kernel_sp), "S" (&t->_kernel_sp), "D" (t)
01092      : "eax", "ebx", "edx", "memory");
01093 }
01094 
01095 
01096 
01097 inline Cpu_time
01098 Context::consumed_time()
01099 {
01100   // When using fine-grained CPU time, this is not usecs but TSC ticks
01101   if (Config::fine_grained_cputime)
01102     return Cpu::tsc_to_us (_consumed_time);
01103 
01104   return _consumed_time;
01105 }
01106 
01107 
01115 inline void
01116 Context::switch_fpu(Context *t)
01117 {
01118   if (Fpu::is_owner(this))
01119     Fpu::disable();
01120 
01121   else if (Fpu::is_owner(t))
01122     Fpu::enable();
01123 }
01124 
01125 
01126 
01127 inline void
01128 update_utcb_ptr() 
01129 {}
01130 
01131 #endif // context_h

Generated on Mon Sep 26 14:20:10 2005 for Fiasco by  doxygen 1.4.2