00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include <linux/kernel_stat.h>
00023 #include <linux/module.h>
00024 #include <linux/interrupt.h>
00025 #include <linux/percpu.h>
00026 #include <linux/init.h>
00027 #include <linux/mm.h>
00028 #include <linux/swap.h>
00029 #include <linux/pid_namespace.h>
00030 #include <linux/notifier.h>
00031 #include <linux/thread_info.h>
00032 #include <linux/time.h>
00033 #include <linux/jiffies.h>
00034 #include <linux/posix-timers.h>
00035 #include <linux/cpu.h>
00036 #include <linux/syscalls.h>
00037 #include <linux/delay.h>
00038 #include <linux/tick.h>
00039 #include <linux/kallsyms.h>
00040
00041 #include <asm/uaccess.h>
00042 #include <asm/unistd.h>
00043 #include <asm/div64.h>
00044 #include <asm/timex.h>
00045 #include <asm/io.h>
00046
00047 #ifndef DDE_LINUX
00048
00049 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
00050
00051 EXPORT_SYMBOL(jiffies_64);
00052
00053
00054
00055
00056 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
00057 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
00058 #define TVN_SIZE (1 << TVN_BITS)
00059 #define TVR_SIZE (1 << TVR_BITS)
00060 #define TVN_MASK (TVN_SIZE - 1)
00061 #define TVR_MASK (TVR_SIZE - 1)
00062
00063 struct tvec {
00064 struct list_head vec[TVN_SIZE];
00065 };
00066
00067 struct tvec_root {
00068 struct list_head vec[TVR_SIZE];
00069 };
00070
00071 struct tvec_base {
00072 spinlock_t lock;
00073 struct timer_list *running_timer;
00074 unsigned long timer_jiffies;
00075 struct tvec_root tv1;
00076 struct tvec tv2;
00077 struct tvec tv3;
00078 struct tvec tv4;
00079 struct tvec tv5;
00080 } ____cacheline_aligned;
00081
00082 struct tvec_base boot_tvec_bases;
00083 EXPORT_SYMBOL(boot_tvec_bases);
00084 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
00085
00086
00087
00088
00089
00090
00091 #define TBASE_DEFERRABLE_FLAG (0x1)
00092
00093
00094 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
00095 {
00096 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
00097 }
00098
00099 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
00100 {
00101 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
00102 }
00103
00104 static inline void timer_set_deferrable(struct timer_list *timer)
00105 {
00106 timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
00107 TBASE_DEFERRABLE_FLAG));
00108 }
00109
00110 static inline void
00111 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
00112 {
00113 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
00114 tbase_get_deferrable(timer->base));
00115 }
00116 #endif
00117
00118 static unsigned long round_jiffies_common(unsigned long j, int cpu,
00119 bool force_up)
00120 {
00121 int rem;
00122 unsigned long original = j;
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132 j += cpu * 3;
00133
00134 rem = j % HZ;
00135
00136
00137
00138
00139
00140
00141
00142
00143 if (rem < HZ/4 && !force_up)
00144 j = j - rem;
00145 else
00146 j = j - rem + HZ;
00147
00148
00149 j -= cpu * 3;
00150
00151 if (j <= jiffies)
00152 return original;
00153 return j;
00154 }
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176 unsigned long __round_jiffies(unsigned long j, int cpu)
00177 {
00178 return round_jiffies_common(j, cpu, false);
00179 }
00180 EXPORT_SYMBOL_GPL(__round_jiffies);
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
00203 {
00204 unsigned long j0 = jiffies;
00205
00206
00207 return round_jiffies_common(j + j0, cpu, false) - j0;
00208 }
00209 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226 unsigned long round_jiffies(unsigned long j)
00227 {
00228 return round_jiffies_common(j, raw_smp_processor_id(), false);
00229 }
00230 EXPORT_SYMBOL_GPL(round_jiffies);
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247 unsigned long round_jiffies_relative(unsigned long j)
00248 {
00249 return __round_jiffies_relative(j, raw_smp_processor_id());
00250 }
00251 EXPORT_SYMBOL_GPL(round_jiffies_relative);
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263 unsigned long __round_jiffies_up(unsigned long j, int cpu)
00264 {
00265 return round_jiffies_common(j, cpu, true);
00266 }
00267 EXPORT_SYMBOL_GPL(__round_jiffies_up);
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
00280 {
00281 unsigned long j0 = jiffies;
00282
00283
00284 return round_jiffies_common(j + j0, cpu, true) - j0;
00285 }
00286 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297 unsigned long round_jiffies_up(unsigned long j)
00298 {
00299 return round_jiffies_common(j, raw_smp_processor_id(), true);
00300 }
00301 EXPORT_SYMBOL_GPL(round_jiffies_up);
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312 unsigned long round_jiffies_up_relative(unsigned long j)
00313 {
00314 return __round_jiffies_up_relative(j, raw_smp_processor_id());
00315 }
00316 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
00317
00318
00319 #ifndef DDE_LINUX
00320 static inline void set_running_timer(struct tvec_base *base,
00321 struct timer_list *timer)
00322 {
00323 #ifdef CONFIG_SMP
00324 base->running_timer = timer;
00325 #endif
00326 }
00327
00328 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
00329 {
00330 unsigned long expires = timer->expires;
00331 unsigned long idx = expires - base->timer_jiffies;
00332 struct list_head *vec;
00333
00334 if (idx < TVR_SIZE) {
00335 int i = expires & TVR_MASK;
00336 vec = base->tv1.vec + i;
00337 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
00338 int i = (expires >> TVR_BITS) & TVN_MASK;
00339 vec = base->tv2.vec + i;
00340 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
00341 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
00342 vec = base->tv3.vec + i;
00343 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
00344 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
00345 vec = base->tv4.vec + i;
00346 } else if ((signed long) idx < 0) {
00347
00348
00349
00350
00351 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
00352 } else {
00353 int i;
00354
00355
00356
00357 if (idx > 0xffffffffUL) {
00358 idx = 0xffffffffUL;
00359 expires = idx + base->timer_jiffies;
00360 }
00361 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
00362 vec = base->tv5.vec + i;
00363 }
00364
00365
00366
00367 list_add_tail(&timer->entry, vec);
00368 }
00369
00370 #ifdef CONFIG_TIMER_STATS
00371 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
00372 {
00373 if (timer->start_site)
00374 return;
00375
00376 timer->start_site = addr;
00377 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
00378 timer->start_pid = current->pid;
00379 }
00380
00381 static void timer_stats_account_timer(struct timer_list *timer)
00382 {
00383 unsigned int flag = 0;
00384
00385 if (unlikely(tbase_get_deferrable(timer->base)))
00386 flag |= TIMER_STATS_FLAG_DEFERRABLE;
00387
00388 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
00389 timer->function, timer->start_comm, flag);
00390 }
00391
00392 #else
00393 static void timer_stats_account_timer(struct timer_list *timer) {}
00394 #endif
00395
00396 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
00397
00398 static struct debug_obj_descr timer_debug_descr;
00399
00400
00401
00402
00403
00404 static int timer_fixup_init(void *addr, enum debug_obj_state state)
00405 {
00406 struct timer_list *timer = addr;
00407
00408 switch (state) {
00409 case ODEBUG_STATE_ACTIVE:
00410 del_timer_sync(timer);
00411 debug_object_init(timer, &timer_debug_descr);
00412 return 1;
00413 default:
00414 return 0;
00415 }
00416 }
00417
00418
00419
00420
00421
00422
00423 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
00424 {
00425 struct timer_list *timer = addr;
00426
00427 switch (state) {
00428
00429 case ODEBUG_STATE_NOTAVAILABLE:
00430
00431
00432
00433
00434
00435 if (timer->entry.next == NULL &&
00436 timer->entry.prev == TIMER_ENTRY_STATIC) {
00437 debug_object_init(timer, &timer_debug_descr);
00438 debug_object_activate(timer, &timer_debug_descr);
00439 return 0;
00440 } else {
00441 WARN_ON_ONCE(1);
00442 }
00443 return 0;
00444
00445 case ODEBUG_STATE_ACTIVE:
00446 WARN_ON(1);
00447
00448 default:
00449 return 0;
00450 }
00451 }
00452
00453
00454
00455
00456
00457 static int timer_fixup_free(void *addr, enum debug_obj_state state)
00458 {
00459 struct timer_list *timer = addr;
00460
00461 switch (state) {
00462 case ODEBUG_STATE_ACTIVE:
00463 del_timer_sync(timer);
00464 debug_object_free(timer, &timer_debug_descr);
00465 return 1;
00466 default:
00467 return 0;
00468 }
00469 }
00470
00471 static struct debug_obj_descr timer_debug_descr = {
00472 .name = "timer_list",
00473 .fixup_init = timer_fixup_init,
00474 .fixup_activate = timer_fixup_activate,
00475 .fixup_free = timer_fixup_free,
00476 };
00477
00478 static inline void debug_timer_init(struct timer_list *timer)
00479 {
00480 debug_object_init(timer, &timer_debug_descr);
00481 }
00482
00483 static inline void debug_timer_activate(struct timer_list *timer)
00484 {
00485 debug_object_activate(timer, &timer_debug_descr);
00486 }
00487
00488 static inline void debug_timer_deactivate(struct timer_list *timer)
00489 {
00490 debug_object_deactivate(timer, &timer_debug_descr);
00491 }
00492
00493 static inline void debug_timer_free(struct timer_list *timer)
00494 {
00495 debug_object_free(timer, &timer_debug_descr);
00496 }
00497
00498 static void __init_timer(struct timer_list *timer);
00499
00500 void init_timer_on_stack(struct timer_list *timer)
00501 {
00502 debug_object_init_on_stack(timer, &timer_debug_descr);
00503 __init_timer(timer);
00504 }
00505 EXPORT_SYMBOL_GPL(init_timer_on_stack);
00506
00507 void destroy_timer_on_stack(struct timer_list *timer)
00508 {
00509 debug_object_free(timer, &timer_debug_descr);
00510 }
00511 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
00512
00513 #else
00514 static inline void debug_timer_init(struct timer_list *timer) { }
00515 static inline void debug_timer_activate(struct timer_list *timer) { }
00516 static inline void debug_timer_deactivate(struct timer_list *timer) { }
00517 #endif
00518
00519 static void __init_timer(struct timer_list *timer)
00520 {
00521 timer->entry.next = NULL;
00522 timer->base = __raw_get_cpu_var(tvec_bases);
00523 #ifdef CONFIG_TIMER_STATS
00524 timer->start_site = NULL;
00525 timer->start_pid = -1;
00526 memset(timer->start_comm, 0, TASK_COMM_LEN);
00527 #endif
00528 }
00529
00530
00531
00532
00533
00534
00535
00536
00537 void init_timer(struct timer_list *timer)
00538 {
00539 debug_timer_init(timer);
00540 __init_timer(timer);
00541 }
00542 EXPORT_SYMBOL(init_timer);
00543
00544 void init_timer_deferrable(struct timer_list *timer)
00545 {
00546 init_timer(timer);
00547 timer_set_deferrable(timer);
00548 }
00549 EXPORT_SYMBOL(init_timer_deferrable);
00550
00551 static inline void detach_timer(struct timer_list *timer,
00552 int clear_pending)
00553 {
00554 struct list_head *entry = &timer->entry;
00555
00556 debug_timer_deactivate(timer);
00557
00558 __list_del(entry->prev, entry->next);
00559 if (clear_pending)
00560 entry->next = NULL;
00561 entry->prev = LIST_POISON2;
00562 }
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572
00573
00574
00575
00576 static struct tvec_base *lock_timer_base(struct timer_list *timer,
00577 unsigned long *flags)
00578 __acquires(timer->base->lock)
00579 {
00580 struct tvec_base *base;
00581
00582 for (;;) {
00583 struct tvec_base *prelock_base = timer->base;
00584 base = tbase_get_base(prelock_base);
00585 if (likely(base != NULL)) {
00586 spin_lock_irqsave(&base->lock, *flags);
00587 if (likely(prelock_base == timer->base))
00588 return base;
00589
00590 spin_unlock_irqrestore(&base->lock, *flags);
00591 }
00592 cpu_relax();
00593 }
00594 }
00595
00596 int __mod_timer(struct timer_list *timer, unsigned long expires)
00597 {
00598 struct tvec_base *base, *new_base;
00599 unsigned long flags;
00600 int ret = 0;
00601
00602 timer_stats_timer_set_start_info(timer);
00603 BUG_ON(!timer->function);
00604
00605 base = lock_timer_base(timer, &flags);
00606
00607 if (timer_pending(timer)) {
00608 detach_timer(timer, 0);
00609 ret = 1;
00610 }
00611
00612 debug_timer_activate(timer);
00613
00614 new_base = __get_cpu_var(tvec_bases);
00615
00616 if (base != new_base) {
00617
00618
00619
00620
00621
00622
00623
00624 if (likely(base->running_timer != timer)) {
00625
00626 timer_set_base(timer, NULL);
00627 spin_unlock(&base->lock);
00628 base = new_base;
00629 spin_lock(&base->lock);
00630 timer_set_base(timer, base);
00631 }
00632 }
00633
00634 timer->expires = expires;
00635 internal_add_timer(base, timer);
00636 spin_unlock_irqrestore(&base->lock, flags);
00637
00638 return ret;
00639 }
00640
00641 EXPORT_SYMBOL(__mod_timer);
00642
00643
00644
00645
00646
00647
00648
00649
00650 void add_timer_on(struct timer_list *timer, int cpu)
00651 {
00652 struct tvec_base *base = per_cpu(tvec_bases, cpu);
00653 unsigned long flags;
00654
00655 timer_stats_timer_set_start_info(timer);
00656 BUG_ON(timer_pending(timer) || !timer->function);
00657 spin_lock_irqsave(&base->lock, flags);
00658 timer_set_base(timer, base);
00659 debug_timer_activate(timer);
00660 internal_add_timer(base, timer);
00661
00662
00663
00664
00665
00666
00667
00668
00669 wake_up_idle_cpu(cpu);
00670 spin_unlock_irqrestore(&base->lock, flags);
00671 }
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693 int mod_timer(struct timer_list *timer, unsigned long expires)
00694 {
00695 BUG_ON(!timer->function);
00696
00697 timer_stats_timer_set_start_info(timer);
00698
00699
00700
00701
00702
00703 if (timer->expires == expires && timer_pending(timer))
00704 return 1;
00705
00706 return __mod_timer(timer, expires);
00707 }
00708
00709 EXPORT_SYMBOL(mod_timer);
00710
00711
00712
00713
00714
00715
00716
00717
00718
00719
00720
00721
00722 int del_timer(struct timer_list *timer)
00723 {
00724 struct tvec_base *base;
00725 unsigned long flags;
00726 int ret = 0;
00727
00728 timer_stats_timer_clear_start_info(timer);
00729 if (timer_pending(timer)) {
00730 base = lock_timer_base(timer, &flags);
00731 if (timer_pending(timer)) {
00732 detach_timer(timer, 1);
00733 ret = 1;
00734 }
00735 spin_unlock_irqrestore(&base->lock, flags);
00736 }
00737
00738 return ret;
00739 }
00740
00741 EXPORT_SYMBOL(del_timer);
00742
00743 #ifdef CONFIG_SMP
00744
00745
00746
00747
00748
00749
00750
00751
00752
00753 int try_to_del_timer_sync(struct timer_list *timer)
00754 {
00755 struct tvec_base *base;
00756 unsigned long flags;
00757 int ret = -1;
00758
00759 base = lock_timer_base(timer, &flags);
00760
00761 if (base->running_timer == timer)
00762 goto out;
00763
00764 ret = 0;
00765 if (timer_pending(timer)) {
00766 detach_timer(timer, 1);
00767 ret = 1;
00768 }
00769 out:
00770 spin_unlock_irqrestore(&base->lock, flags);
00771
00772 return ret;
00773 }
00774
00775 EXPORT_SYMBOL(try_to_del_timer_sync);
00776
00777
00778
00779
00780
00781
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791
00792
00793
00794 int del_timer_sync(struct timer_list *timer)
00795 {
00796 for (;;) {
00797 int ret = try_to_del_timer_sync(timer);
00798 if (ret >= 0)
00799 return ret;
00800 cpu_relax();
00801 }
00802 }
00803
00804 EXPORT_SYMBOL(del_timer_sync);
00805 #endif
00806
00807 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
00808 {
00809
00810 struct timer_list *timer, *tmp;
00811 struct list_head tv_list;
00812
00813 list_replace_init(tv->vec + index, &tv_list);
00814
00815
00816
00817
00818
00819 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
00820 BUG_ON(tbase_get_base(timer->base) != base);
00821 internal_add_timer(base, timer);
00822 }
00823
00824 return index;
00825 }
00826
00827 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
00828
00829
00830
00831
00832
00833
00834
00835
00836 static inline void __run_timers(struct tvec_base *base)
00837 {
00838 struct timer_list *timer;
00839
00840 spin_lock_irq(&base->lock);
00841 while (time_after_eq(jiffies, base->timer_jiffies)) {
00842 struct list_head work_list;
00843 struct list_head *head = &work_list;
00844 int index = base->timer_jiffies & TVR_MASK;
00845
00846
00847
00848
00849 if (!index &&
00850 (!cascade(base, &base->tv2, INDEX(0))) &&
00851 (!cascade(base, &base->tv3, INDEX(1))) &&
00852 !cascade(base, &base->tv4, INDEX(2)))
00853 cascade(base, &base->tv5, INDEX(3));
00854 ++base->timer_jiffies;
00855 list_replace_init(base->tv1.vec + index, &work_list);
00856 while (!list_empty(head)) {
00857 void (*fn)(unsigned long);
00858 unsigned long data;
00859
00860 timer = list_first_entry(head, struct timer_list,entry);
00861 fn = timer->function;
00862 data = timer->data;
00863
00864 timer_stats_account_timer(timer);
00865
00866 set_running_timer(base, timer);
00867 detach_timer(timer, 1);
00868 spin_unlock_irq(&base->lock);
00869 {
00870 int preempt_count = preempt_count();
00871 fn(data);
00872 if (preempt_count != preempt_count()) {
00873 printk(KERN_ERR "huh, entered %p "
00874 "with preempt_count %08x, exited"
00875 " with %08x?\n",
00876 fn, preempt_count,
00877 preempt_count());
00878 BUG();
00879 }
00880 }
00881 spin_lock_irq(&base->lock);
00882 }
00883 }
00884 set_running_timer(base, NULL);
00885 spin_unlock_irq(&base->lock);
00886 }
00887
00888 #ifdef CONFIG_NO_HZ
00889
00890
00891
00892
00893
00894 static unsigned long __next_timer_interrupt(struct tvec_base *base)
00895 {
00896 unsigned long timer_jiffies = base->timer_jiffies;
00897 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
00898 int index, slot, array, found = 0;
00899 struct timer_list *nte;
00900 struct tvec *varray[4];
00901
00902
00903 index = slot = timer_jiffies & TVR_MASK;
00904 do {
00905 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
00906 if (tbase_get_deferrable(nte->base))
00907 continue;
00908
00909 found = 1;
00910 expires = nte->expires;
00911
00912 if (!index || slot < index)
00913 goto cascade;
00914 return expires;
00915 }
00916 slot = (slot + 1) & TVR_MASK;
00917 } while (slot != index);
00918
00919 cascade:
00920
00921 if (index)
00922 timer_jiffies += TVR_SIZE - index;
00923 timer_jiffies >>= TVR_BITS;
00924
00925
00926 varray[0] = &base->tv2;
00927 varray[1] = &base->tv3;
00928 varray[2] = &base->tv4;
00929 varray[3] = &base->tv5;
00930
00931 for (array = 0; array < 4; array++) {
00932 struct tvec *varp = varray[array];
00933
00934 index = slot = timer_jiffies & TVN_MASK;
00935 do {
00936 list_for_each_entry(nte, varp->vec + slot, entry) {
00937 found = 1;
00938 if (time_before(nte->expires, expires))
00939 expires = nte->expires;
00940 }
00941
00942
00943
00944
00945 if (found) {
00946
00947 if (!index || slot < index)
00948 break;
00949 return expires;
00950 }
00951 slot = (slot + 1) & TVN_MASK;
00952 } while (slot != index);
00953
00954 if (index)
00955 timer_jiffies += TVN_SIZE - index;
00956 timer_jiffies >>= TVN_BITS;
00957 }
00958 return expires;
00959 }
00960
00961
00962
00963
00964
00965 static unsigned long cmp_next_hrtimer_event(unsigned long now,
00966 unsigned long expires)
00967 {
00968 ktime_t hr_delta = hrtimer_get_next_event();
00969 struct timespec tsdelta;
00970 unsigned long delta;
00971
00972 if (hr_delta.tv64 == KTIME_MAX)
00973 return expires;
00974
00975
00976
00977
00978 if (hr_delta.tv64 <= 0)
00979 return now + 1;
00980
00981 tsdelta = ktime_to_timespec(hr_delta);
00982 delta = timespec_to_jiffies(&tsdelta);
00983
00984
00985
00986
00987
00988 if (delta > NEXT_TIMER_MAX_DELTA)
00989 delta = NEXT_TIMER_MAX_DELTA;
00990
00991
00992
00993
00994
00995
00996
00997 if (delta < 1)
00998 delta = 1;
00999 now += delta;
01000 if (time_before(now, expires))
01001 return now;
01002 return expires;
01003 }
01004
01005
01006
01007
01008
01009 unsigned long get_next_timer_interrupt(unsigned long now)
01010 {
01011 struct tvec_base *base = __get_cpu_var(tvec_bases);
01012 unsigned long expires;
01013
01014 spin_lock(&base->lock);
01015 expires = __next_timer_interrupt(base);
01016 spin_unlock(&base->lock);
01017
01018 if (time_before_eq(expires, now))
01019 return now;
01020
01021 return cmp_next_hrtimer_event(now, expires);
01022 }
01023 #endif
01024
01025
01026
01027
01028
01029 void update_process_times(int user_tick)
01030 {
01031 struct task_struct *p = current;
01032 int cpu = smp_processor_id();
01033
01034
01035 account_process_tick(p, user_tick);
01036 run_local_timers();
01037 if (rcu_pending(cpu))
01038 rcu_check_callbacks(cpu, user_tick);
01039 printk_tick();
01040 scheduler_tick();
01041 run_posix_cpu_timers(p);
01042 }
01043
01044
01045
01046
01047 static unsigned long count_active_tasks(void)
01048 {
01049 return nr_active() * FIXED_1;
01050 }
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060 unsigned long avenrun[3];
01061
01062 EXPORT_SYMBOL(avenrun);
01063
01064
01065
01066
01067
01068 static inline void calc_load(unsigned long ticks)
01069 {
01070 unsigned long active_tasks;
01071 static int count = LOAD_FREQ;
01072
01073 count -= ticks;
01074 if (unlikely(count < 0)) {
01075 active_tasks = count_active_tasks();
01076 do {
01077 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
01078 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
01079 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
01080 count += LOAD_FREQ;
01081 } while (count < 0);
01082 }
01083 }
01084
01085
01086
01087
01088 static void run_timer_softirq(struct softirq_action *h)
01089 {
01090 struct tvec_base *base = __get_cpu_var(tvec_bases);
01091
01092 hrtimer_run_pending();
01093
01094 if (time_after_eq(jiffies, base->timer_jiffies))
01095 __run_timers(base);
01096 }
01097
01098
01099
01100
01101 void run_local_timers(void)
01102 {
01103 hrtimer_run_queues();
01104 raise_softirq(TIMER_SOFTIRQ);
01105 softlockup_tick();
01106 }
01107
01108
01109
01110
01111
01112 static inline void update_times(unsigned long ticks)
01113 {
01114 update_wall_time();
01115 calc_load(ticks);
01116 }
01117
01118
01119
01120
01121
01122
01123
01124 void do_timer(unsigned long ticks)
01125 {
01126 jiffies_64 += ticks;
01127 update_times(ticks);
01128 }
01129
01130 #ifdef __ARCH_WANT_SYS_ALARM
01131
01132
01133
01134
01135
01136 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
01137 {
01138 return alarm_setitimer(seconds);
01139 }
01140
01141 #endif
01142
01143 #ifndef __alpha__
01144
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155
01156
01157
01158
01159 SYSCALL_DEFINE0(getpid)
01160 {
01161 return task_tgid_vnr(current);
01162 }
01163
01164
01165
01166
01167
01168
01169
01170 SYSCALL_DEFINE0(getppid)
01171 {
01172 int pid;
01173
01174 rcu_read_lock();
01175 pid = task_tgid_vnr(current->real_parent);
01176 rcu_read_unlock();
01177
01178 return pid;
01179 }
01180
01181 SYSCALL_DEFINE0(getuid)
01182 {
01183
01184 return current_uid();
01185 }
01186
01187 SYSCALL_DEFINE0(geteuid)
01188 {
01189
01190 return current_euid();
01191 }
01192
01193 SYSCALL_DEFINE0(getgid)
01194 {
01195
01196 return current_gid();
01197 }
01198
01199 SYSCALL_DEFINE0(getegid)
01200 {
01201
01202 return current_egid();
01203 }
01204
01205 #endif
01206
01207 static void process_timeout(unsigned long __data)
01208 {
01209 wake_up_process((struct task_struct *)__data);
01210 }
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236
01237
01238 signed long __sched schedule_timeout(signed long timeout)
01239 {
01240 struct timer_list timer;
01241 unsigned long expire;
01242
01243 switch (timeout)
01244 {
01245 case MAX_SCHEDULE_TIMEOUT:
01246
01247
01248
01249
01250
01251
01252
01253 schedule();
01254 goto out;
01255 default:
01256
01257
01258
01259
01260
01261
01262
01263 if (timeout < 0) {
01264 printk(KERN_ERR "schedule_timeout: wrong timeout "
01265 "value %lx\n", timeout);
01266 dump_stack();
01267 current->state = TASK_RUNNING;
01268 goto out;
01269 }
01270 }
01271
01272 expire = timeout + jiffies;
01273
01274 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
01275 __mod_timer(&timer, expire);
01276 schedule();
01277 del_singleshot_timer_sync(&timer);
01278
01279
01280 destroy_timer_on_stack(&timer);
01281
01282 timeout = expire - jiffies;
01283
01284 out:
01285 return timeout < 0 ? 0 : timeout;
01286 }
01287 EXPORT_SYMBOL(schedule_timeout);
01288
01289
01290
01291
01292
01293 signed long __sched schedule_timeout_interruptible(signed long timeout)
01294 {
01295 __set_current_state(TASK_INTERRUPTIBLE);
01296 return schedule_timeout(timeout);
01297 }
01298 EXPORT_SYMBOL(schedule_timeout_interruptible);
01299
01300 signed long __sched schedule_timeout_killable(signed long timeout)
01301 {
01302 __set_current_state(TASK_KILLABLE);
01303 return schedule_timeout(timeout);
01304 }
01305 EXPORT_SYMBOL(schedule_timeout_killable);
01306
01307 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
01308 {
01309 __set_current_state(TASK_UNINTERRUPTIBLE);
01310 return schedule_timeout(timeout);
01311 }
01312 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
01313
01314
01315 SYSCALL_DEFINE0(gettid)
01316 {
01317 return task_pid_vnr(current);
01318 }
01319
01320
01321
01322
01323
01324 int do_sysinfo(struct sysinfo *info)
01325 {
01326 unsigned long mem_total, sav_total;
01327 unsigned int mem_unit, bitcount;
01328 unsigned long seq;
01329
01330 memset(info, 0, sizeof(struct sysinfo));
01331
01332 do {
01333 struct timespec tp;
01334 seq = read_seqbegin(&xtime_lock);
01335
01336
01337
01338
01339
01340
01341
01342
01343 getnstimeofday(&tp);
01344 tp.tv_sec += wall_to_monotonic.tv_sec;
01345 tp.tv_nsec += wall_to_monotonic.tv_nsec;
01346 monotonic_to_bootbased(&tp);
01347 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
01348 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
01349 tp.tv_sec++;
01350 }
01351 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
01352
01353 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
01354 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
01355 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
01356
01357 info->procs = nr_threads;
01358 } while (read_seqretry(&xtime_lock, seq));
01359
01360 si_meminfo(info);
01361 si_swapinfo(info);
01362
01363
01364
01365
01366
01367
01368
01369
01370
01371
01372 mem_total = info->totalram + info->totalswap;
01373 if (mem_total < info->totalram || mem_total < info->totalswap)
01374 goto out;
01375 bitcount = 0;
01376 mem_unit = info->mem_unit;
01377 while (mem_unit > 1) {
01378 bitcount++;
01379 mem_unit >>= 1;
01380 sav_total = mem_total;
01381 mem_total <<= 1;
01382 if (mem_total < sav_total)
01383 goto out;
01384 }
01385
01386
01387
01388
01389
01390
01391
01392
01393 info->mem_unit = 1;
01394 info->totalram <<= bitcount;
01395 info->freeram <<= bitcount;
01396 info->sharedram <<= bitcount;
01397 info->bufferram <<= bitcount;
01398 info->totalswap <<= bitcount;
01399 info->freeswap <<= bitcount;
01400 info->totalhigh <<= bitcount;
01401 info->freehigh <<= bitcount;
01402
01403 out:
01404 return 0;
01405 }
01406
01407 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
01408 {
01409 struct sysinfo val;
01410
01411 do_sysinfo(&val);
01412
01413 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
01414 return -EFAULT;
01415
01416 return 0;
01417 }
01418
01419 static int __cpuinit init_timers_cpu(int cpu)
01420 {
01421 int j;
01422 struct tvec_base *base;
01423 static char __cpuinitdata tvec_base_done[NR_CPUS];
01424
01425 if (!tvec_base_done[cpu]) {
01426 static char boot_done;
01427
01428 if (boot_done) {
01429
01430
01431
01432 base = kmalloc_node(sizeof(*base),
01433 GFP_KERNEL | __GFP_ZERO,
01434 cpu_to_node(cpu));
01435 if (!base)
01436 return -ENOMEM;
01437
01438
01439 if (tbase_get_deferrable(base)) {
01440 WARN_ON(1);
01441 kfree(base);
01442 return -ENOMEM;
01443 }
01444 per_cpu(tvec_bases, cpu) = base;
01445 } else {
01446
01447
01448
01449
01450
01451
01452 boot_done = 1;
01453 base = &boot_tvec_bases;
01454 }
01455 tvec_base_done[cpu] = 1;
01456 } else {
01457 base = per_cpu(tvec_bases, cpu);
01458 }
01459
01460 spin_lock_init(&base->lock);
01461
01462 for (j = 0; j < TVN_SIZE; j++) {
01463 INIT_LIST_HEAD(base->tv5.vec + j);
01464 INIT_LIST_HEAD(base->tv4.vec + j);
01465 INIT_LIST_HEAD(base->tv3.vec + j);
01466 INIT_LIST_HEAD(base->tv2.vec + j);
01467 }
01468 for (j = 0; j < TVR_SIZE; j++)
01469 INIT_LIST_HEAD(base->tv1.vec + j);
01470
01471 base->timer_jiffies = jiffies;
01472 return 0;
01473 }
01474
01475 #ifdef CONFIG_HOTPLUG_CPU
01476 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
01477 {
01478 struct timer_list *timer;
01479
01480 while (!list_empty(head)) {
01481 timer = list_first_entry(head, struct timer_list, entry);
01482 detach_timer(timer, 0);
01483 timer_set_base(timer, new_base);
01484 internal_add_timer(new_base, timer);
01485 }
01486 }
01487
01488 static void __cpuinit migrate_timers(int cpu)
01489 {
01490 struct tvec_base *old_base;
01491 struct tvec_base *new_base;
01492 int i;
01493
01494 BUG_ON(cpu_online(cpu));
01495 old_base = per_cpu(tvec_bases, cpu);
01496 new_base = get_cpu_var(tvec_bases);
01497
01498
01499
01500
01501 spin_lock_irq(&new_base->lock);
01502 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
01503
01504 BUG_ON(old_base->running_timer);
01505
01506 for (i = 0; i < TVR_SIZE; i++)
01507 migrate_timer_list(new_base, old_base->tv1.vec + i);
01508 for (i = 0; i < TVN_SIZE; i++) {
01509 migrate_timer_list(new_base, old_base->tv2.vec + i);
01510 migrate_timer_list(new_base, old_base->tv3.vec + i);
01511 migrate_timer_list(new_base, old_base->tv4.vec + i);
01512 migrate_timer_list(new_base, old_base->tv5.vec + i);
01513 }
01514
01515 spin_unlock(&old_base->lock);
01516 spin_unlock_irq(&new_base->lock);
01517 put_cpu_var(tvec_bases);
01518 }
01519 #endif
01520
01521 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
01522 unsigned long action, void *hcpu)
01523 {
01524 long cpu = (long)hcpu;
01525 switch(action) {
01526 case CPU_UP_PREPARE:
01527 case CPU_UP_PREPARE_FROZEN:
01528 if (init_timers_cpu(cpu) < 0)
01529 return NOTIFY_BAD;
01530 break;
01531 #ifdef CONFIG_HOTPLUG_CPU
01532 case CPU_DEAD:
01533 case CPU_DEAD_FROZEN:
01534 migrate_timers(cpu);
01535 break;
01536 #endif
01537 default:
01538 break;
01539 }
01540 return NOTIFY_OK;
01541 }
01542
01543 static struct notifier_block __cpuinitdata timers_nb = {
01544 .notifier_call = timer_cpu_notify,
01545 };
01546
01547
01548 void __init init_timers(void)
01549 {
01550 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
01551 (void *)(long)smp_processor_id());
01552
01553 init_timer_stats();
01554
01555 BUG_ON(err == NOTIFY_BAD);
01556 register_cpu_notifier(&timers_nb);
01557 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
01558 }
01559
01560
01561
01562
01563
01564 void msleep(unsigned int msecs)
01565 {
01566 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
01567
01568 while (timeout)
01569 timeout = schedule_timeout_uninterruptible(timeout);
01570 }
01571
01572 EXPORT_SYMBOL(msleep);
01573 #endif
01574
01575
01576
01577
01578
01579 unsigned long msleep_interruptible(unsigned int msecs)
01580 {
01581 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
01582
01583 while (timeout && !signal_pending(current))
01584 timeout = schedule_timeout_interruptible(timeout);
01585 return jiffies_to_msecs(timeout);
01586 }
01587
01588 EXPORT_SYMBOL(msleep_interruptible);