1 /*
  2  *  kernel/sched.c
  3  *
  4  *  Kernel scheduler and related syscalls
  5  *
  6  *  Copyright (C) 1991-2002  Linus Torvalds
  7  *
  8  *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
  9  *              make semaphores SMP safe
 10  *  1998-11-19  Implemented schedule_timeout() and related stuff
 11  *              by Andrea Arcangeli
 12  *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
 13  *              hybrid priority-list and round-robin design with
 14  *              an array-switch method of distributing timeslices
 15  *              and per-CPU runqueues.  Cleanups and useful suggestions
 16  *              by Davide Libenzi, preemptible kernel bits by Robert Love.
 17  *  2003-09-03  Interactivity tuning by Con Kolivas.
 18  */
 19 
 20 #include <linux/mm.h>
 21 #include <linux/module.h>
 22 #include <linux/nmi.h>
 23 #include <linux/init.h>
 24 #include <asm/uaccess.h>
 25 #include <linux/highmem.h>
 26 #include <linux/smp_lock.h>
 27 #include <asm/mmu_context.h>
 28 #include <linux/interrupt.h>
 29 #include <linux/completion.h>
 30 #include <linux/kernel_stat.h>
 31 #include <linux/security.h>
 32 #include <linux/notifier.h>
 33 #include <linux/suspend.h>
 34 #include <linux/blkdev.h>
 35 #include <linux/delay.h>
 36 #include <linux/timer.h>
 37 #include <linux/rcupdate.h>
 38 #include <linux/cpu.h>
 39 #include <linux/percpu.h>
 40 
 41 #ifdef CONFIG_NUMA
 42 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
 43 #else
 44 #define cpu_to_node_mask(cpu) (cpu_online_map)
 45 #endif
 46 
 47 /*
 48  * Convert user-nice values [ -20 ... 0 ... 19 ]
 49  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
 50  * and back.
 51  */
 52 #define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
 53 #define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
 54 #define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
 55 
 56 /*
 57  * 'User priority' is the nice value converted to something we
 58  * can work with better when scaling various scheduler parameters,
 59  * it's a [ 0 ... 39 ] range.
 60  */
 61 #define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
 62 #define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
 63 #define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
 64 #define AVG_TIMESLICE   (MIN_TIMESLICE + ((MAX_TIMESLICE - MIN_TIMESLICE) *\
 65                         (MAX_PRIO-1-NICE_TO_PRIO(0))/(MAX_USER_PRIO - 1)))
 66 
 67 /*
 68  * Some helpers for converting nanosecond timing to jiffy resolution
 69  */
 70 #define NS_TO_JIFFIES(TIME)     ((TIME) / (1000000000 / HZ))
 71 #define JIFFIES_TO_NS(TIME)     ((TIME) * (1000000000 / HZ))
 72 
 73 /*
 74  * These are the 'tuning knobs' of the scheduler:
 75  *
 76  * Minimum timeslice is 10 msecs, default timeslice is 100 msecs,
 77  * maximum timeslice is 200 msecs. Timeslices get refilled after
 78  * they expire.
 79  */
 80 #define MIN_TIMESLICE           ( 10 * HZ / 1000)
 81 #define MAX_TIMESLICE           (200 * HZ / 1000)
 82 #define ON_RUNQUEUE_WEIGHT      30
 83 #define CHILD_PENALTY           95
 84 #define PARENT_PENALTY          100
 85 #define EXIT_WEIGHT             3
 86 #define PRIO_BONUS_RATIO        25
 87 #define MAX_BONUS               (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
 88 #define INTERACTIVE_DELTA       2
 89 #define MAX_SLEEP_AVG           (AVG_TIMESLICE * MAX_BONUS)
 90 #define STARVATION_LIMIT        (MAX_SLEEP_AVG)
 91 #define NS_MAX_SLEEP_AVG        (JIFFIES_TO_NS(MAX_SLEEP_AVG))
 92 #define NODE_THRESHOLD          125
 93 #define CREDIT_LIMIT            100
 94 
 95 /*
 96  * If a task is 'interactive' then we reinsert it in the active
 97  * array after it has expired its current timeslice. (it will not
 98  * continue to run immediately, it will still roundrobin with
 99  * other interactive tasks.)
100  *
101  * This part scales the interactivity limit depending on niceness.
102  *
103  * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
104  * Here are a few examples of different nice levels:
105  *
106  *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
107  *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
108  *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
109  *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
110  *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
111  *
112  * (the X axis represents the possible -5 ... 0 ... +5 dynamic
113  *  priority range a task can explore, a value of '1' means the
114  *  task is rated interactive.)
115  *
116  * Ie. nice +19 tasks can never get 'interactive' enough to be
117  * reinserted into the active array. And only heavily CPU-hog nice -20
118  * tasks will be expired. Default nice 0 tasks are somewhere between,
119  * it takes some effort for them to get interactive, but it's not
120  * too hard.
121  */
122 
123 #define CURRENT_BONUS(p) \
124         (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
125                 MAX_SLEEP_AVG)
126 
127 #ifdef CONFIG_SMP
128 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
129                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
130                         num_online_cpus())
131 #else
132 #define TIMESLICE_GRANULARITY(p)        (MIN_TIMESLICE * \
133                 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
134 #endif
135 
136 #define SCALE(v1,v1_max,v2_max) \
137         (v1) * (v2_max) / (v1_max)
138 
139 #define DELTA(p) \
140         (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \
141                 INTERACTIVE_DELTA)
142 
143 #define TASK_INTERACTIVE(p) \
144         ((p)->prio <= (p)->static_prio - DELTA(p))
145 
146 #define JUST_INTERACTIVE_SLEEP(p) \
147         (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
148                 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
149 
150 #define HIGH_CREDIT(p) \
151         ((p)->interactive_credit > CREDIT_LIMIT)
152 
153 #define LOW_CREDIT(p) \
154         ((p)->interactive_credit < -CREDIT_LIMIT)
155 
156 #define TASK_PREEMPTS_CURR(p, rq) \
157         ((p)->prio < (rq)->curr->prio)
158 
159 /*
160  * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
161  * to time slice values.
162  *
163  * The higher a thread's priority, the bigger timeslices
164  * it gets during one round of execution. But even the lowest
165  * priority thread gets MIN_TIMESLICE worth of execution time.
166  *
167  * task_timeslice() is the interface that is used by the scheduler.
168  */
169 
170 #define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
171         ((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1)))
172 
173 static inline unsigned int task_timeslice(task_t *p)
174 {
175         return BASE_TIMESLICE(p);
176 }
177 
178 /*
179  * These are the runqueue data structures:
180  */
181 
182 #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
183 
184 typedef struct runqueue runqueue_t;
185 
186 struct prio_array {
187         int nr_active;
188         unsigned long bitmap[BITMAP_SIZE];
189         struct list_head queue[MAX_PRIO];
190 };
191 
192 /*
193  * This is the main, per-CPU runqueue data structure.
194  *
195  * Locking rule: those places that want to lock multiple runqueues
196  * (such as the load balancing or the thread migration code), lock
197  * acquire operations must be ordered by ascending &runqueue.
198  */
199 struct runqueue {
200         spinlock_t lock;
201         unsigned long nr_running, nr_switches, expired_timestamp,
202                         nr_uninterruptible;
203         task_t *curr, *idle;
204         struct mm_struct *prev_mm;
205         prio_array_t *active, *expired, arrays[2];
206         int prev_cpu_load[NR_CPUS];
207 #ifdef CONFIG_NUMA
208         atomic_t *node_nr_running;
209         int prev_node_load[MAX_NUMNODES];
210 #endif
211         task_t *migration_thread;
212         struct list_head migration_queue;
213 
214         atomic_t nr_iowait;
215 };
216 
217 static DEFINE_PER_CPU(struct runqueue, runqueues);
218 
219 #define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
220 #define this_rq()               (&__get_cpu_var(runqueues))
221 #define task_rq(p)              cpu_rq(task_cpu(p))
222 #define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
223 
224 /*
225  * Default context-switch locking:
226  */
227 #ifndef prepare_arch_switch
228 # define prepare_arch_switch(rq, next)  do { } while(0)
229 # define finish_arch_switch(rq, next)   spin_unlock_irq(&(rq)->lock)
230 # define task_running(rq, p)            ((rq)->curr == (p))
231 #endif
232 
233 #ifdef CONFIG_NUMA
234 
235 /*
236  * Keep track of running tasks.
237  */
238 
239 static atomic_t node_nr_running[MAX_NUMNODES] ____cacheline_maxaligned_in_smp =
240         {[0 ...MAX_NUMNODES-1] = ATOMIC_INIT(0)};
241 
242 static inline void nr_running_init(struct runqueue *rq)
243 {
244         rq->node_nr_running = &node_nr_running[0];
245 }
246 
247 static inline void nr_running_inc(runqueue_t *rq)
248 {
249         atomic_inc(rq->node_nr_running);
250         rq->nr_running++;
251 }
252 
253 static inline void nr_running_dec(runqueue_t *rq)
254 {
255         atomic_dec(rq->node_nr_running);
256         rq->nr_running--;
257 }
258 
259 __init void node_nr_running_init(void)
260 {
261         int i;
262 
263         for (i = 0; i < NR_CPUS; i++) {
264                 if (cpu_possible(i))
265                         cpu_rq(i)->node_nr_running =
266                                 &node_nr_running[cpu_to_node(i)];
267         }
268 }
269 
270 #else /* !CONFIG_NUMA */
271 
272 # define nr_running_init(rq)   do { } while (0)
273 # define nr_running_inc(rq)    do { (rq)->nr_running++; } while (0)
274 # define nr_running_dec(rq)    do { (rq)->nr_running--; } while (0)
275 
276 #endif /* CONFIG_NUMA */
277 
278 /*
279  * task_rq_lock - lock the runqueue a given task resides on and disable
280  * interrupts.  Note the ordering: we can safely lookup the task_rq without
281  * explicitly disabling preemption.
282  */
283 static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
284 {
285         struct runqueue *rq;
286 
287 repeat_lock_task:
288         local_irq_save(*flags);
289         rq = task_rq(p);
290         spin_lock(&rq->lock);
291         if (unlikely(rq != task_rq(p))) {
292                 spin_unlock_irqrestore(&rq->lock, *flags);
293                 goto repeat_lock_task;
294         }
295         return rq;
296 }
297 
298 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
299 {
300         spin_unlock_irqrestore(&rq->lock, *flags);
301 }
302 
303 /*
304  * rq_lock - lock a given runqueue and disable interrupts.
305  */
306 static inline runqueue_t *this_rq_lock(void)
307 {
308         runqueue_t *rq;
309 
310         local_irq_disable();
311         rq = this_rq();
312         spin_lock(&rq->lock);
313 
314         return rq;
315 }
316 
317 static inline void rq_unlock(runqueue_t *rq)
318 {
319         spin_unlock_irq(&rq->lock);
320 }
321 
322 /*
323  * Adding/removing a task to/from a priority array:
324  */
325 static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
326 {
327         array->nr_active--;
328         list_del(&p->run_list);
329         if (list_empty(array->queue + p->prio))
330                 __clear_bit(p->prio, array->bitmap);
331 }
332 
333 static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
334 {
335         list_add_tail(&p->run_list, array->queue + p->prio);
336         __set_bit(p->prio, array->bitmap);
337         array->nr_active++;
338         p->array = array;
339 }
340 
341 /*
342  * effective_prio - return the priority that is based on the static
343  * priority but is modified by bonuses/penalties.
344  *
345  * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
346  * into the -5 ... 0 ... +5 bonus/penalty range.
347  *
348  * We use 25% of the full 0...39 priority range so that:
349  *
350  * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
351  * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
352  *
353  * Both properties are important to certain workloads.
354  */
355 static int effective_prio(task_t *p)
356 {
357         int bonus, prio;
358 
359         if (rt_task(p))
360                 return p->prio;
361 
362         bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
363 
364         prio = p->static_prio - bonus;
365         if (prio < MAX_RT_PRIO)
366                 prio = MAX_RT_PRIO;
367         if (prio > MAX_PRIO-1)
368                 prio = MAX_PRIO-1;
369         return prio;
370 }
371 
372 /*
373  * __activate_task - move a task to the runqueue.
374  */
375 static inline void __activate_task(task_t *p, runqueue_t *rq)
376 {
377         enqueue_task(p, rq->active);
378         nr_running_inc(rq);
379 }
380 
381 static void recalc_task_prio(task_t *p, unsigned long long now)
382 {
383         unsigned long long __sleep_time = now - p->timestamp;
384         unsigned long sleep_time;
385 
386         if (__sleep_time > NS_MAX_SLEEP_AVG)
387                 sleep_time = NS_MAX_SLEEP_AVG;
388         else
389                 sleep_time = (unsigned long)__sleep_time;
390 
391         if (likely(sleep_time > 0)) {
392                 /*
393                  * User tasks that sleep a long time are categorised as
394                  * idle and will get just interactive status to stay active &
395                  * prevent them suddenly becoming cpu hogs and starving
396                  * other processes.
397                  */
398                 if (p->mm && p->activated != -1 &&
399                         sleep_time > JUST_INTERACTIVE_SLEEP(p)){
400                                 p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
401                                                 AVG_TIMESLICE);
402                                 if (!HIGH_CREDIT(p))
403                                         p->interactive_credit++;
404                 } else {
405                         /*
406                          * The lower the sleep avg a task has the more
407                          * rapidly it will rise with sleep time.
408                          */
409                         sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
410 
411                         /*
412                          * Tasks with low interactive_credit are limited to
413                          * one timeslice worth of sleep avg bonus.
414                          */
415                         if (LOW_CREDIT(p) &&
416                                 sleep_time > JIFFIES_TO_NS(task_timeslice(p)))
417                                         sleep_time =
418                                                 JIFFIES_TO_NS(task_timeslice(p));
419 
420                         /*
421                          * Non high_credit tasks waking from uninterruptible
422                          * sleep are limited in their sleep_avg rise as they
423                          * are likely to be cpu hogs waiting on I/O
424                          */
425                         if (p->activated == -1 && !HIGH_CREDIT(p) && p->mm){
426                                 if (p->sleep_avg >= JUST_INTERACTIVE_SLEEP(p))
427                                         sleep_time = 0;
428                                 else if (p->sleep_avg + sleep_time >=
429                                         JUST_INTERACTIVE_SLEEP(p)){
430                                                 p->sleep_avg =
431                                                         JUST_INTERACTIVE_SLEEP(p);
432                                                 sleep_time = 0;
433                                         }
434                         }
435 
436                         /*
437                          * This code gives a bonus to interactive tasks.
438                          *
439                          * The boost works by updating the 'average sleep time'
440                          * value here, based on ->timestamp. The more time a task
441                          * spends sleeping, the higher the average gets - and the
442                          * higher the priority boost gets as well.
443                          */
444                         p->sleep_avg += sleep_time;
445 
446                         if (p->sleep_avg > NS_MAX_SLEEP_AVG){
447                                 p->sleep_avg = NS_MAX_SLEEP_AVG;
448                                 if (!HIGH_CREDIT(p))
449                                         p->interactive_credit++;
450                         }
451                 }
452         }
453 
454         p->prio = effective_prio(p);
455 }
456 
457 /*
458  * activate_task - move a task to the runqueue and do priority recalculation
459  *
460  * Update all the scheduling statistics stuff. (sleep average
461  * calculation, priority modifiers, etc.)
462  */
463 static inline void activate_task(task_t *p, runqueue_t *rq)
464 {
465         unsigned long long now = sched_clock();
466 
467         recalc_task_prio(p, now);
468 
469         /*
470          * This checks to make sure it's not an uninterruptible task
471          * that is now waking up.
472          */
473         if (!p->activated){
474                 /*
475                  * Tasks which were woken up by interrupts (ie. hw events)
476                  * are most likely of interactive nature. So we give them
477                  * the credit of extending their sleep time to the period
478                  * of time they spend on the runqueue, waiting for execution
479                  * on a CPU, first time around:
480                  */
481                 if (in_interrupt())
482                         p->activated = 2;
483                 else
484                 /*
485                  * Normal first-time wakeups get a credit too for on-runqueue
486                  * time, but it will be weighted down:
487                  */
488                         p->activated = 1;
489                 }
490         p->timestamp = now;
491 
492         __activate_task(p, rq);
493 }
494 
495 /*
496  * deactivate_task - remove a task from the runqueue.
497  */
498 static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
499 {
500         nr_running_dec(rq);
501         if (p->state == TASK_UNINTERRUPTIBLE)
502                 rq->nr_uninterruptible++;
503         dequeue_task(p, p->array);
504         p->array = NULL;
505 }
506 
507 /*
508  * resched_task - mark a task 'to be rescheduled now'.
509  *
510  * On UP this means the setting of the need_resched flag, on SMP it
511  * might also involve a cross-CPU call to trigger the scheduler on
512  * the target CPU.
513  */
514 static inline void resched_task(task_t *p)
515 {
516 #ifdef CONFIG_SMP
517         int need_resched, nrpolling;
518 
519         preempt_disable();
520         /* minimise the chance of sending an interrupt to poll_idle() */
521         nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
522         need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
523         nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
524 
525         if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
526                 smp_send_reschedule(task_cpu(p));
527         preempt_enable();
528 #else
529         set_tsk_need_resched(p);
530 #endif
531 }
532 
533 /**
534  * task_curr - is this task currently executing on a CPU?
535  * @p: the task in question.
536  */
537 inline int task_curr(task_t *p)
538 {
539         return cpu_curr(task_cpu(p)) == p;
540 }
541 
542 #ifdef CONFIG_SMP
543 
544 /*
545  * wait_task_inactive - wait for a thread to unschedule.
546  *
547  * The caller must ensure that the task *will* unschedule sometime soon,
548  * else this function might spin for a *long* time. This function can't
549  * be called with interrupts off, or it may introduce deadlock with
550  * smp_call_function() if an IPI is sent by the same process we are
551  * waiting to become inactive.
552  */
553 void wait_task_inactive(task_t * p)
554 {
555         unsigned long flags;
556         runqueue_t *rq;
557 
558 repeat:
559         preempt_disable();
560         rq = task_rq(p);
561         if (unlikely(task_running(rq, p))) {
562                 cpu_relax();
563                 /*
564                  * enable/disable preemption just to make this
565                  * a preemption point - we are busy-waiting
566                  * anyway.
567                  */
568                 preempt_enable();
569                 goto repeat;
570         }
571         rq = task_rq_lock(p, &flags);
572         if (unlikely(task_running(rq, p))) {
573                 task_rq_unlock(rq, &flags);
574                 preempt_enable();
575                 goto repeat;
576         }
577         task_rq_unlock(rq, &flags);
578         preempt_enable();
579 }
580 
581 /***
582  * kick_process - kick a running thread to enter/exit the kernel
583  * @p: the to-be-kicked thread
584  *
585  * Cause a process which is running on another CPU to enter
586  * kernel-mode, without any delay. (to get signals handled.)
587  */
588 void kick_process(task_t *p)
589 {
590         int cpu;
591 
592         preempt_disable();
593         cpu = task_cpu(p);
594         if ((cpu != smp_processor_id()) && task_curr(p))
595                 smp_send_reschedule(cpu);
596         preempt_enable();
597 }
598 
599 EXPORT_SYMBOL_GPL(kick_process);
600 
601 #endif
602 
603 /***
604  * try_to_wake_up - wake up a thread
605  * @p: the to-be-woken-up thread
606  * @state: the mask of task states that can be woken
607  * @sync: do a synchronous wakeup?
608  *
609  * Put it on the run-queue if it's not already there. The "current"
610  * thread is always on the run-queue (except when the actual
611  * re-schedule is in progress), and as such you're allowed to do
612  * the simpler "current->state = TASK_RUNNING" to mark yourself
613  * runnable without the overhead of this.
614  *
615  * returns failure only if the task is already active.
616  */
617 static int try_to_wake_up(task_t * p, unsigned int state, int sync)
618 {
619         unsigned long flags;
620         int success = 0;
621         long old_state;
622         runqueue_t *rq;
623 
624 repeat_lock_task:
625         rq = task_rq_lock(p, &flags);
626         old_state = p->state;
627         if (old_state & state) {
628                 if (!p->array) {
629                         /*
630                          * Fast-migrate the task if it's not running or runnable
631                          * currently. Do not violate hard affinity.
632                          */
633                         if (unlikely(sync && !task_running(rq, p) &&
634                                 (task_cpu(p) != smp_processor_id()) &&
635                                 cpu_isset(smp_processor_id(), p->cpus_allowed))) {
636 
637                                 set_task_cpu(p, smp_processor_id());
638                                 task_rq_unlock(rq, &flags);
639                                 goto repeat_lock_task;
640                         }
641                         if (old_state == TASK_UNINTERRUPTIBLE){
642                                 rq->nr_uninterruptible--;
643                                 /*
644                                  * Tasks on involuntary sleep don't earn
645                                  * sleep_avg beyond just interactive state.
646                                  */
647                                 p->activated = -1;
648                         }
649                         if (sync && (task_cpu(p) == smp_processor_id()))
650                                 __activate_task(p, rq);
651                         else {
652                                 activate_task(p, rq);
653                                 if (TASK_PREEMPTS_CURR(p, rq))
654                                         resched_task(rq->curr);
655                         }
656                         success = 1;
657                 }
658                 p->state = TASK_RUNNING;
659         }
660         task_rq_unlock(rq, &flags);
661 
662         return success;
663 }
664 int wake_up_process(task_t * p)
665 {
666         return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
667 }
668 
669 EXPORT_SYMBOL(wake_up_process);
670 
671 int wake_up_state(task_t *p, unsigned int state)
672 {
673         return try_to_wake_up(p, state, 0);
674 }
675 
676 /*
677  * wake_up_forked_process - wake up a freshly forked process.
678  *
679  * This function will do some initial scheduler statistics housekeeping
680  * that must be done for every newly created process.
681  */
682 void wake_up_forked_process(task_t * p)
683 {
684         unsigned long flags;
685         runqueue_t *rq = task_rq_lock(current, &flags);
686 
687         p->state = TASK_RUNNING;
688         /*
689          * We decrease the sleep average of forking parents
690          * and children as well, to keep max-interactive tasks
691          * from forking tasks that are max-interactive.
692          */
693         current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
694                 PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
695 
696         p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
697                 CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
698 
699         p->interactive_credit = 0;
700 
701         p->prio = effective_prio(p);
702         set_task_cpu(p, smp_processor_id());
703 
704         if (unlikely(!current->array))
705                 __activate_task(p, rq);
706         else {
707                 p->prio = current->prio;
708                 list_add_tail(&p->run_list, &current->run_list);
709                 p->array = current->array;
710                 p->array->nr_active++;
711                 nr_running_inc(rq);
712         }
713         task_rq_unlock(rq, &flags);
714 }
715 
716 /*
717  * Potentially available exiting-child timeslices are
718  * retrieved here - this way the parent does not get
719  * penalized for creating too many threads.
720  *
721  * (this cannot be used to 'generate' timeslices
722  * artificially, because any timeslice recovered here
723  * was given away by the parent in the first place.)
724  */
725 void sched_exit(task_t * p)
726 {
727         unsigned long flags;
728 
729         local_irq_save(flags);
730         if (p->first_time_slice) {
731                 p->parent->time_slice += p->time_slice;
732                 if (unlikely(p->parent->time_slice > MAX_TIMESLICE))
733                         p->parent->time_slice = MAX_TIMESLICE;
734         }
735         local_irq_restore(flags);
736         /*
737          * If the child was a (relative-) CPU hog then decrease
738          * the sleep_avg of the parent as well.
739          */
740         if (p->sleep_avg < p->parent->sleep_avg)
741                 p->parent->sleep_avg = p->parent->sleep_avg /
742                 (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
743                 (EXIT_WEIGHT + 1);
744 }
745 
746 /**
747  * finish_task_switch - clean up after a task-switch
748  * @prev: the thread we just switched away from.
749  *
750  * We enter this with the runqueue still locked, and finish_arch_switch()
751  * will unlock it along with doing any other architecture-specific cleanup
752  * actions.
753  *
754  * Note that we may have delayed dropping an mm in context_switch(). If
755  * so, we finish that here outside of the runqueue lock.  (Doing it
756  * with the lock held can cause deadlocks; see schedule() for
757  * details.)
758  */
759 static inline void finish_task_switch(task_t *prev)
760 {
761         runqueue_t *rq = this_rq();
762         struct mm_struct *mm = rq->prev_mm;
763         unsigned long prev_task_flags;
764 
765         rq->prev_mm = NULL;
766 
767         /*
768          * A task struct has one reference for the use as "current".
769          * If a task dies, then it sets TASK_ZOMBIE in tsk->state and calls
770          * schedule one last time. The schedule call will never return,
771          * and the scheduled task must drop that reference.
772          * The test for TASK_ZOMBIE must occur while the runqueue locks are
773          * still held, otherwise prev could be scheduled on another cpu, die
774          * there before we look at prev->state, and then the reference would
775          * be dropped twice.
776          *              Manfred Spraul <manfred@colorfullife.com>
777          */
778         prev_task_flags = prev->flags;
779         finish_arch_switch(rq, prev);
780         if (mm)
781                 mmdrop(mm);
782         if (unlikely(prev_task_flags & PF_DEAD))
783                 put_task_struct(prev);
784 }
785 
786 /**
787  * schedule_tail - first thing a freshly forked thread must call.
788  * @prev: the thread we just switched away from.
789  */
790 asmlinkage void schedule_tail(task_t *prev)
791 {
792         finish_task_switch(prev);
793 
794         if (current->set_child_tid)
795                 put_user(current->pid, current->set_child_tid);
796 }
797 
798 /*
799  * context_switch - switch to the new MM and the new
800  * thread's register state.
801  */
802 static inline task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
803 {
804         struct mm_struct *mm = next->mm;
805         struct mm_struct *oldmm = prev->active_mm;
806 
807         if (unlikely(!mm)) {
808                 next->active_mm = oldmm;
809                 atomic_inc(&oldmm->mm_count);
810                 enter_lazy_tlb(oldmm, next);
811         } else
812                 switch_mm(oldmm, mm, next);
813 
814         if (unlikely(!prev->mm)) {
815                 prev->active_mm = NULL;
816                 WARN_ON(rq->prev_mm);
817                 rq->prev_mm = oldmm;
818         }
819 
820         /* Here we just switch the register state and the stack. */
821         switch_to(prev, next, prev);
822 
823         return prev;
824 }
825 
826 /*
827  * nr_running, nr_uninterruptible and nr_context_switches:
828  *
829  * externally visible scheduler statistics: current number of runnable
830  * threads, current number of uninterruptible-sleeping threads, total
831  * number of context switches performed since bootup.
832  */
833 unsigned long nr_running(void)
834 {
835         unsigned long i, sum = 0;
836 
837         for (i = 0; i < NR_CPUS; i++)
838                 sum += cpu_rq(i)->nr_running;
839 
840         return sum;
841 }
842 
843 unsigned long nr_uninterruptible(void)
844 {
845         unsigned long i, sum = 0;
846 
847         for (i = 0; i < NR_CPUS; i++) {
848                 if (!cpu_online(i))
849                         continue;
850                 sum += cpu_rq(i)->nr_uninterruptible;
851         }
852         return sum;
853 }
854 
855 unsigned long nr_context_switches(void)
856 {
857         unsigned long i, sum = 0;
858 
859         for (i = 0; i < NR_CPUS; i++) {
860                 if (!cpu_online(i))
861                         continue;
862                 sum += cpu_rq(i)->nr_switches;
863         }
864         return sum;
865 }
866 
867 unsigned long nr_iowait(void)
868 {
869         unsigned long i, sum = 0;
870 
871         for (i = 0; i < NR_CPUS; ++i) {
872                 if (!cpu_online(i))
873                         continue;
874                 sum += atomic_read(&cpu_rq(i)->nr_iowait);
875         }
876         return sum;
877 }
878 
879 /*
880  * double_rq_lock - safely lock two runqueues
881  *
882  * Note this does not disable interrupts like task_rq_lock,
883  * you need to do so manually before calling.
884  */
885 static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
886 {
887         if (rq1 == rq2)
888                 spin_lock(&rq1->lock);
889         else {
890                 if (rq1 < rq2) {
891                         spin_lock(&rq1->lock);
892                         spin_lock(&rq2->lock);
893                 } else {
894                         spin_lock(&rq2->lock);
895                         spin_lock(&rq1->lock);
896                 }
897         }
898 }
899 
900 /*
901  * double_rq_unlock - safely unlock two runqueues
902  *
903  * Note this does not restore interrupts like task_rq_unlock,
904  * you need to do so manually after calling.
905  */
906 static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
907 {
908         spin_unlock(&rq1->lock);
909         if (rq1 != rq2)
910                 spin_unlock(&rq2->lock);
911 }
912 
913 #ifdef CONFIG_NUMA
914 /*
915  * If dest_cpu is allowed for this process, migrate the task to it.
916  * This is accomplished by forcing the cpu_allowed mask to only
917  * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
918  * the cpu_allowed mask is restored.
919  */
920 static void sched_migrate_task(task_t *p, int dest_cpu)
921 {
922         cpumask_t old_mask;
923 
924         old_mask = p->cpus_allowed;
925         if (!cpu_isset(dest_cpu, old_mask))
926                 return;
927         /* force the process onto the specified CPU */
928         set_cpus_allowed(p, cpumask_of_cpu(dest_cpu));
929 
930         /* restore the cpus allowed mask */
931         set_cpus_allowed(p, old_mask);
932 }
933 
934 /*
935  * Find the least loaded CPU.  Slightly favor the current CPU by
936  * setting its runqueue length as the minimum to start.
937  */
938 static int sched_best_cpu(struct task_struct *p)
939 {
940         int i, minload, load, best_cpu, node = 0;
941         cpumask_t cpumask;
942 
943         best_cpu = task_cpu(p);
944         if (cpu_rq(best_cpu)->nr_running <= 2)
945                 return best_cpu;
946 
947         minload = 10000000;
948         for_each_node_with_cpus(i) {
949                 /*
950                  * Node load is always divided by nr_cpus_node to normalise 
951                  * load values in case cpu count differs from node to node.
952                  * We first multiply node_nr_running by 10 to get a little
953                  * better resolution.   
954                  */
955                 load = 10 * atomic_read(&node_nr_running[i]) / nr_cpus_node(i);
956                 if (load < minload) {
957                         minload = load;
958                         node = i;
959                 }
960         }
961 
962         minload = 10000000;
963         cpumask = node_to_cpumask(node);
964         for (i = 0; i < NR_CPUS; ++i) {
965                 if (!cpu_isset(i, cpumask))
966                         continue;
967                 if (cpu_rq(i)->nr_running < minload) {
968                         best_cpu = i;
969                         minload = cpu_rq(i)->nr_running;
970                 }
971         }
972         return best_cpu;
973 }
974 
975 void sched_balance_exec(void)
976 {
977         int new_cpu;
978 
979         if (numnodes > 1) {
980                 new_cpu = sched_best_cpu(current);
981                 if (new_cpu != smp_processor_id())
982                         sched_migrate_task(current, new_cpu);
983         }
984 }
985 
986 /*
987  * Find the busiest node. All previous node loads contribute with a
988  * geometrically deccaying weight to the load measure:
989  *      load_{t} = load_{t-1}/2 + nr_node_running_{t}
990  * This way sudden load peaks are flattened out a bit.
991  * Node load is divided by nr_cpus_node() in order to compare nodes
992  * of different cpu count but also [first] multiplied by 10 to 
993  * provide better resolution.
994  */
995 static int find_busiest_node(int this_node)
996 {
997         int i, node = -1, load, this_load, maxload;
998 
999         if (!nr_cpus_node(this_node))
1000                 return node;
1001         this_load = maxload = (this_rq()->prev_node_load[this_node] >> 1)
1002                 + (10 * atomic_read(&node_nr_running[this_node])
1003                 / nr_cpus_node(this_node));
1004         this_rq()->prev_node_load[this_node] = this_load;
1005         for_each_node_with_cpus(i) {
1006                 if (i == this_node)
1007                         continue;
1008                 load = (this_rq()->prev_node_load[i] >> 1)
1009                         + (10 * atomic_read(&node_nr_running[i])
1010                         / nr_cpus_node(i));
1011                 this_rq()->prev_node_load[i] = load;
1012                 if (load > maxload && (100*load > NODE_THRESHOLD*this_load)) {
1013                         maxload = load;
1014                         node = i;
1015                 }
1016         }
1017         return node;
1018 }
1019 
1020 #endif /* CONFIG_NUMA */
1021 
1022 #ifdef CONFIG_SMP
1023 
1024 /*
1025  * double_lock_balance - lock the busiest runqueue
1026  *
1027  * this_rq is locked already. Recalculate nr_running if we have to
1028  * drop the runqueue lock.
1029  */
1030 static inline unsigned int double_lock_balance(runqueue_t *this_rq,
1031         runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running)
1032 {
1033         if (unlikely(!spin_trylock(&busiest->lock))) {
1034                 if (busiest < this_rq) {
1035                         spin_unlock(&this_rq->lock);
1036                         spin_lock(&busiest->lock);
1037                         spin_lock(&this_rq->lock);
1038                         /* Need to recalculate nr_running */
1039                         if (idle || (this_rq->nr_running > this_rq->prev_cpu_load[this_cpu]))
1040                                 nr_running = this_rq->nr_running;
1041                         else
1042                                 nr_running = this_rq->prev_cpu_load[this_cpu];
1043                 } else
1044                         spin_lock(&busiest->lock);
1045         }
1046         return nr_running;
1047 }
1048 
1049 /*
1050  * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
1051  */
1052 static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, cpumask_t cpumask)
1053 {
1054         int nr_running, load, max_load, i;
1055         runqueue_t *busiest, *rq_src;
1056 
1057         /*
1058          * We search all runqueues to find the most busy one.
1059          * We do this lockless to reduce cache-bouncing overhead,
1060          * we re-check the 'best' source CPU later on again, with
1061          * the lock held.
1062          *
1063          * We fend off statistical fluctuations in runqueue lengths by
1064          * saving the runqueue length during the previous load-balancing
1065          * operation and using the smaller one the current and saved lengths.
1066          * If a runqueue is long enough for a longer amount of time then
1067          * we recognize it and pull tasks from it.
1068          *
1069          * The 'current runqueue length' is a statistical maximum variable,
1070          * for that one we take the longer one - to avoid fluctuations in
1071          * the other direction. So for a load-balance to happen it needs
1072          * stable long runqueue on the target CPU and stable short runqueue
1073          * on the local runqueue.
1074          *
1075          * We make an exception if this CPU is about to become idle - in
1076          * that case we are less picky about moving a task across CPUs and
1077          * take what can be taken.
1078          */
1079         if (idle || (this_rq->nr_running > this_rq->prev_cpu_load[this_cpu]))
1080                 nr_running = this_rq->nr_running;
1081         else
1082                 nr_running = this_rq->prev_cpu_load[this_cpu];
1083 
1084         busiest = NULL;
1085         max_load = 1;
1086         for (i = 0; i < NR_CPUS; i++) {
1087                 if (!cpu_isset(i, cpumask))
1088                         continue;
1089 
1090                 rq_src = cpu_rq(i);
1091                 if (idle || (rq_src->nr_running < this_rq->prev_cpu_load[i]))
1092                         load = rq_src->nr_running;
1093                 else
1094                         load = this_rq->prev_cpu_load[i];
1095                 this_rq->prev_cpu_load[i] = rq_src->nr_running;
1096 
1097                 if ((load > max_load) && (rq_src != this_rq)) {
1098                         busiest = rq_src;
1099                         max_load = load;
1100                 }
1101         }
1102 
1103         if (likely(!busiest))
1104                 goto out;
1105 
1106         *imbalance = max_load - nr_running;
1107 
1108         /* It needs an at least ~25% imbalance to trigger balancing. */
1109         if (!idle && ((*imbalance)*4 < max_load)) {
1110                 busiest = NULL;
1111                 goto out;
1112         }
1113 
1114         nr_running = double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running);
1115         /*
1116          * Make sure nothing changed since we checked the
1117          * runqueue length.
1118          */
1119         if (busiest->nr_running <= nr_running) {
1120                 spin_unlock(&busiest->lock);
1121                 busiest = NULL;
1122         }
1123 out:
1124         return busiest;
1125 }
1126 
1127 /*
1128  * pull_task - move a task from a remote runqueue to the local runqueue.
1129  * Both runqueues must be locked.
1130  */
1131 static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, int this_cpu)
1132 {
1133         dequeue_task(p, src_array);
1134         nr_running_dec(src_rq);
1135         set_task_cpu(p, this_cpu);
1136         nr_running_inc(this_rq);
1137         enqueue_task(p, this_rq->active);
1138         /*
1139          * Note that idle threads have a prio of MAX_PRIO, for this test
1140          * to be always true for them.
1141          */
1142         if (TASK_PREEMPTS_CURR(p, this_rq))
1143                 set_need_resched();
1144 }
1145 
1146 /*
1147  * Previously:
1148  *
1149  * #define CAN_MIGRATE_TASK(p,rq,this_cpu)      \
1150  *      ((!idle || (NS_TO_JIFFIES(now - (p)->timestamp) > \
1151  *              cache_decay_ticks)) && !task_running(rq, p) && \
1152  *                      cpu_isset(this_cpu, (p)->cpus_allowed))
1153  */
1154 
1155 static inline int
1156 can_migrate_task(task_t *tsk, runqueue_t *rq, int this_cpu, int idle)
1157 {
1158         unsigned long delta = sched_clock() - tsk->timestamp;
1159 
1160         if (!idle && (delta <= JIFFIES_TO_NS(cache_decay_ticks)))
1161                 return 0;
1162         if (task_running(rq, tsk))
1163                 return 0;
1164         if (!cpu_isset(this_cpu, tsk->cpus_allowed))
1165                 return 0;
1166         return 1;
1167 }
1168 
1169 /*
1170  * Current runqueue is empty, or rebalance tick: if there is an
1171  * inbalance (current runqueue is too short) then pull from
1172  * busiest runqueue(s).
1173  *
1174  * We call this with the current runqueue locked,
1175  * irqs disabled.
1176  */
1177 static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask)
1178 {
1179         int imbalance, idx, this_cpu = smp_processor_id();
1180         runqueue_t *busiest;
1181         prio_array_t *array;
1182         struct list_head *head, *curr;
1183         task_t *tmp;
1184 
1185         busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance, cpumask);
1186         if (!busiest)
1187                 goto out;
1188 
1189         /*
1190          * We only want to steal a number of tasks equal to 1/2 the imbalance,
1191          * otherwise we'll just shift the imbalance to the new queue:
1192          */
1193         imbalance /= 2;
1194 
1195         /*
1196          * We first consider expired tasks. Those will likely not be
1197          * executed in the near future, and they are most likely to
1198          * be cache-cold, thus switching CPUs has the least effect
1199          * on them.
1200          */
1201         if (busiest->expired->nr_active)
1202                 array = busiest->expired;
1203         else
1204                 array = busiest->active;
1205 
1206 new_array:
1207         /* Start searching at priority 0: */
1208         idx = 0;
1209 skip_bitmap:
1210         if (!idx)
1211                 idx = sched_find_first_bit(array->bitmap);
1212         else
1213                 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
1214         if (idx >= MAX_PRIO) {
1215                 if (array == busiest->expired) {
1216                         array = busiest->active;
1217                         goto new_array;
1218                 }
1219                 goto out_unlock;
1220         }
1221 
1222         head = array->queue + idx;
1223         curr = head->prev;
1224 skip_queue:
1225         tmp = list_entry(curr, task_t, run_list);
1226 
1227         /*
1228          * We do not migrate tasks that are:
1229          * 1) running (obviously), or
1230          * 2) cannot be migrated to this CPU due to cpus_allowed, or
1231          * 3) are cache-hot on their current CPU.
1232          */
1233 
1234         curr = curr->prev;
1235 
1236         if (!can_migrate_task(tmp, busiest, this_cpu, idle)) {
1237                 if (curr != head)
1238                         goto skip_queue;
1239                 idx++;
1240                 goto skip_bitmap;
1241         }
1242         pull_task(busiest, array, tmp, this_rq, this_cpu);
1243         if (!idle && --imbalance) {
1244                 if (curr != head)
1245                         goto skip_queue;
1246                 idx++;
1247                 goto skip_bitmap;
1248         }
1249 out_unlock:
1250         spin_unlock(&busiest->lock);
1251 out:
1252         ;
1253 }
1254 
1255 /*
1256  * One of the idle_cpu_tick() and busy_cpu_tick() functions will
1257  * get called every timer tick, on every CPU. Our balancing action
1258  * frequency and balancing agressivity depends on whether the CPU is
1259  * idle or not.
1260  *
1261  * busy-rebalance every 200 msecs. idle-rebalance every 1 msec. (or on
1262  * systems with HZ=100, every 10 msecs.)
1263  *
1264  * On NUMA, do a node-rebalance every 400 msecs.
1265  */
1266 #define IDLE_REBALANCE_TICK (HZ/1000 ?: 1)
1267 #define BUSY_REBALANCE_TICK (HZ/5 ?: 1)
1268 #define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * 5)
1269 #define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * 2)
1270 
1271 #ifdef CONFIG_NUMA
1272 static void balance_node(runqueue_t *this_rq, int idle, int this_cpu)
1273 {
1274         int node = find_busiest_node(cpu_to_node(this_cpu));
1275 
1276         if (node >= 0) {
1277                 cpumask_t cpumask = node_to_cpumask(node);
1278                 cpu_set(this_cpu, cpumask);
1279                 spin_lock(&this_rq->lock);
1280                 load_balance(this_rq, idle, cpumask);
1281                 spin_unlock(&this_rq->lock);
1282         }
1283 }
1284 #endif
1285 
1286 static void rebalance_tick(runqueue_t *this_rq, int idle)
1287 {
1288 #ifdef CONFIG_NUMA
1289         int this_cpu = smp_processor_id();
1290 #endif
1291         unsigned long j = jiffies;
1292 
1293         /*
1294          * First do inter-node rebalancing, then intra-node rebalancing,
1295          * if both events happen in the same tick. The inter-node
1296          * rebalancing does not necessarily have to create a perfect
1297          * balance within the node, since we load-balance the most loaded
1298          * node with the current CPU. (ie. other CPUs in the local node
1299          * are not balanced.)
1300          */
1301         if (idle) {
1302 #ifdef CONFIG_NUMA
1303                 if (!(j % IDLE_NODE_REBALANCE_TICK))
1304                         balance_node(this_rq, idle, this_cpu);
1305 #endif
1306                 if (!(j % IDLE_REBALANCE_TICK)) {
1307                         spin_lock(&this_rq->lock);
1308                         load_balance(this_rq, idle, cpu_to_node_mask(this_cpu));
1309                         spin_unlock(&this_rq->lock);
1310                 }
1311                 return;
1312         }
1313 #ifdef CONFIG_NUMA
1314         if (!(j % BUSY_NODE_REBALANCE_TICK))
1315                 balance_node(this_rq, idle, this_cpu);
1316 #endif
1317         if (!(j % BUSY_REBALANCE_TICK)) {
1318                 spin_lock(&this_rq->lock);
1319                 load_balance(this_rq, idle, cpu_to_node_mask(this_cpu));
1320                 spin_unlock(&this_rq->lock);
1321         }
1322 }
1323 #else
1324 /*
1325  * on UP we do not need to balance between CPUs:
1326  */
1327 static inline void rebalance_tick(runqueue_t *this_rq, int idle)
1328 {
1329 }
1330 #endif
1331 
1332 DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } };
1333 
1334 EXPORT_PER_CPU_SYMBOL(kstat);
1335 
1336 /*
1337  * We place interactive tasks back into the active array, if possible.
1338  *
1339  * To guarantee that this does not starve expired tasks we ignore the
1340  * interactivity of a task if the first expired task had to wait more
1341  * than a 'reasonable' amount of time. This deadline timeout is
1342  * load-dependent, as the frequency of array switched decreases with
1343  * increasing number of running tasks:
1344  */
1345 #define EXPIRED_STARVING(rq) \
1346                 (STARVATION_LIMIT && ((rq)->expired_timestamp && \
1347                 (jiffies - (rq)->expired_timestamp >= \
1348                         STARVATION_LIMIT * ((rq)->nr_running) + 1)))
1349 
1350 /*
1351  * This function gets called by the timer code, with HZ frequency.
1352  * We call it with interrupts disabled.
1353  *
1354  * It also gets called by the fork code, when changing the parent's
1355  * timeslices.
1356  */
1357 void scheduler_tick(int user_ticks, int sys_ticks)
1358 {
1359         int cpu = smp_processor_id();
1360         struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1361         runqueue_t *rq = this_rq();
1362         task_t *p = current;
1363 
1364         if (rcu_pending(cpu))
1365                 rcu_check_callbacks(cpu, user_ticks);
1366 
1367         /* note: this timer irq context must be accounted for as well */
1368         if (hardirq_count() - HARDIRQ_OFFSET) {
1369                 cpustat->irq += sys_ticks;
1370                 sys_ticks = 0;
1371         } else if (softirq_count()) {
1372                 cpustat->softirq += sys_ticks;
1373                 sys_ticks = 0;
1374         }
1375 
1376         if (p == rq->idle) {
1377                 if (atomic_read(&rq->nr_iowait) > 0)
1378                         cpustat->iowait += sys_ticks;
1379                 else
1380                         cpustat->idle += sys_ticks;
1381                 rebalance_tick(rq, 1);
1382                 return;
1383         }
1384         if (TASK_NICE(p) > 0)
1385                 cpustat->nice += user_ticks;
1386         else
1387                 cpustat->user += user_ticks;
1388         cpustat->system += sys_ticks;
1389 
1390         /* Task might have expired already, but not scheduled off yet */
1391         if (p->array != rq->active) {
1392                 set_tsk_need_resched(p);
1393                 goto out;
1394         }
1395         spin_lock(&rq->lock);
1396         /*
1397          * The task was running during this tick - update the
1398          * time slice counter. Note: we do not update a thread's
1399          * priority until it either goes to sleep or uses up its
1400          * timeslice. This makes it possible for interactive tasks
1401          * to use up their timeslices at their highest priority levels.
1402          */
1403         if (unlikely(rt_task(p))) {
1404                 /*
1405                  * RR tasks need a special form of timeslice management.
1406                  * FIFO tasks have no timeslices.
1407                  */
1408                 if ((p->policy == SCHED_RR) && !--p->time_slice) {
1409                         p->time_slice = task_timeslice(p);
1410                         p->first_time_slice = 0;
1411                         set_tsk_need_resched(p);
1412 
1413                         /* put it at the end of the queue: */
1414                         dequeue_task(p, rq->active);
1415                         enqueue_task(p, rq->active);
1416                 }
1417                 goto out_unlock;
1418         }
1419         if (!--p->time_slice) {
1420                 dequeue_task(p, rq->active);
1421                 set_tsk_need_resched(p);
1422                 p->prio = effective_prio(p);
1423                 p->time_slice = task_timeslice(p);
1424                 p->first_time_slice = 0;
1425 
1426                 if (!rq->expired_timestamp)
1427                         rq->expired_timestamp = jiffies;
1428                 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
1429                         enqueue_task(p, rq->expired);
1430                 } else
1431                         enqueue_task(p, rq->active);
1432         } else {
1433                 /*
1434                  * Prevent a too long timeslice allowing a task to monopolize
1435                  * the CPU. We do this by splitting up the timeslice into
1436                  * smaller pieces.
1437                  *
1438                  * Note: this does not mean the task's timeslices expire or
1439                  * get lost in any way, they just might be preempted by
1440                  * another task of equal priority. (one with higher
1441                  * priority would have preempted this task already.) We
1442                  * requeue this task to the end of the list on this priority
1443                  * level, which is in essence a round-robin of tasks with
1444                  * equal priority.
1445                  *
1446                  * This only applies to tasks in the interactive
1447                  * delta range with at least TIMESLICE_GRANULARITY to requeue.
1448                  */
1449                 if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
1450                         p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
1451                         (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
1452                         (p->array == rq->active)) {
1453 
1454                         dequeue_task(p, rq->active);
1455                         set_tsk_need_resched(p);
1456                         p->prio = effective_prio(p);
1457                         enqueue_task(p, rq->active);
1458                 }
1459         }
1460 out_unlock:
1461         spin_unlock(&rq->lock);
1462 out:
1463         rebalance_tick(rq, 0);
1464 }
1465 
1466 void scheduling_functions_start_here(void) { }
1467 
1468 /*
1469  * schedule() is the main scheduler function.
1470  */
1471 asmlinkage void schedule(void)
1472 {
1473         task_t *prev, *next;
1474         runqueue_t *rq;
1475         prio_array_t *array;
1476         struct list_head *queue;
1477         unsigned long long now;
1478         unsigned long run_time;
1479         int idx;
1480 
1481         /*
1482          * Test if we are atomic.  Since do_exit() needs to call into
1483          * schedule() atomically, we ignore that path for now.
1484          * Otherwise, whine if we are scheduling when we should not be.
1485          */
1486         if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) {
1487                 if (unlikely(in_atomic())) {
1488                         printk(KERN_ERR "bad: scheduling while atomic!\n");
1489                         dump_stack();
1490                 }
1491         }
1492 
1493 need_resched:
1494         preempt_disable();
1495         prev = current;
1496         rq = this_rq();
1497 
1498         release_kernel_lock(prev);
1499         now = sched_clock();
1500         if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
1501                 run_time = now - prev->timestamp;
1502         else
1503                 run_time = NS_MAX_SLEEP_AVG;
1504 
1505         /*
1506          * Tasks with interactive credits get charged less run_time
1507          * at high sleep_avg to delay them losing their interactive
1508          * status
1509          */
1510         if (HIGH_CREDIT(prev))
1511                 run_time /= (CURRENT_BONUS(prev) ? : 1);
1512 
1513         spin_lock_irq(&rq->lock);
1514 
1515         /*
1516          * if entering off of a kernel preemption go straight
1517          * to picking the next task.
1518          */
1519         if (unlikely(preempt_count() & PREEMPT_ACTIVE))
1520                 goto pick_next_task;
1521 
1522         switch (prev->state) {
1523         case TASK_INTERRUPTIBLE:
1524                 if (unlikely(signal_pending(prev))) {
1525                         prev->state = TASK_RUNNING;
1526                         break;
1527                 }
1528         default:
1529                 deactivate_task(prev, rq);
1530                 prev->nvcsw++;
1531                 break;
1532         case TASK_RUNNING:
1533                 prev->nivcsw++;
1534         }
1535 pick_next_task:
1536         if (unlikely(!rq->nr_running)) {
1537 #ifdef CONFIG_SMP
1538                 load_balance(rq, 1, cpu_to_node_mask(smp_processor_id()));
1539                 if (rq->nr_running)
1540                         goto pick_next_task;
1541 #endif
1542                 next = rq->idle;
1543                 rq->expired_timestamp = 0;
1544                 goto switch_tasks;
1545         }
1546 
1547         array = rq->active;
1548         if (unlikely(!array->nr_active)) {
1549                 /*
1550                  * Switch the active and expired arrays.
1551                  */
1552                 rq->active = rq->expired;
1553                 rq->expired = array;
1554                 array = rq->active;
1555                 rq->expired_timestamp = 0;
1556         }
1557 
1558         idx = sched_find_first_bit(array->bitmap);
1559         queue = array->queue + idx;
1560         next = list_entry(queue->next, task_t, run_list);
1561 
1562         if (next->activated > 0) {
1563                 unsigned long long delta = now - next->timestamp;
1564 
1565                 if (next->activated == 1)
1566                         delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
1567 
1568                 array = next->array;
1569                 dequeue_task(next, array);
1570                 recalc_task_prio(next, next->timestamp + delta);
1571                 enqueue_task(next, array);
1572         }
1573         next->activated = 0;
1574 switch_tasks:
1575         prefetch(next);
1576         clear_tsk_need_resched(prev);
1577         RCU_qsctr(task_cpu(prev))++;
1578 
1579         prev->sleep_avg -= run_time;
1580         if ((long)prev->sleep_avg <= 0){
1581                 prev->sleep_avg = 0;
1582                 if (!(HIGH_CREDIT(prev) || LOW_CREDIT(prev)))
1583                         prev->interactive_credit--;
1584         }
1585         prev->timestamp = now;
1586 
1587         if (likely(prev != next)) {
1588                 next->timestamp = now;
1589                 rq->nr_switches++;
1590                 rq->curr = next;
1591 
1592                 prepare_arch_switch(rq, next);
1593                 prev = context_switch(rq, prev, next);
1594                 barrier();
1595 
1596                 finish_task_switch(prev);
1597         } else
1598                 spin_unlock_irq(&rq->lock);
1599 
1600         reacquire_kernel_lock(current);
1601         preempt_enable_no_resched();
1602         if (test_thread_flag(TIF_NEED_RESCHED))
1603                 goto need_resched;
1604 }
1605 
1606 EXPORT_SYMBOL(schedule);
1607 
1608 #ifdef CONFIG_PREEMPT
1609 /*
1610  * this is is the entry point to schedule() from in-kernel preemption
1611  * off of preempt_enable.  Kernel preemptions off return from interrupt
1612  * occur there and call schedule directly.
1613  */
1614 asmlinkage void preempt_schedule(void)
1615 {
1616         struct thread_info *ti = current_thread_info();
1617 
1618         /*
1619          * If there is a non-zero preempt_count or interrupts are disabled,
1620          * we do not want to preempt the current task.  Just return..
1621          */
1622         if (unlikely(ti->preempt_count || irqs_disabled()))
1623                 return;
1624 
1625 need_resched:
1626         ti->preempt_count = PREEMPT_ACTIVE;
1627         schedule();
1628         ti->preempt_count = 0;
1629 
1630         /* we could miss a preemption opportunity between schedule and now */
1631         barrier();
1632         if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
1633                 goto need_resched;
1634 }
1635 
1636 EXPORT_SYMBOL(preempt_schedule);
1637 #endif /* CONFIG_PREEMPT */
1638 
1639 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
1640 {
1641         task_t *p = curr->task;
1642         return try_to_wake_up(p, mode, sync);
1643 }
1644 
1645 EXPORT_SYMBOL(default_wake_function);
1646 
1647 /*
1648  * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
1649  * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
1650  * number) then we wake all the non-exclusive tasks and one exclusive task.
1651  *
1652  * There are circumstances in which we can try to wake a task which has already
1653  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
1654  * zero in this (rare) case, and we handle it by continuing to scan the queue.
1655  */
1656 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
1657 {
1658         struct list_head *tmp, *next;
1659 
1660         list_for_each_safe(tmp, next, &q->task_list) {
1661                 wait_queue_t *curr;
1662                 unsigned flags;
1663                 curr = list_entry(tmp, wait_queue_t, task_list);
1664                 flags = curr->flags;
1665                 if (curr->func(curr, mode, sync) &&
1666                     (flags & WQ_FLAG_EXCLUSIVE) &&
1667                     !--nr_exclusive)
1668                         break;
1669         }
1670 }
1671 
1672 /**
1673  * __wake_up - wake up threads blocked on a waitqueue.
1674  * @q: the waitqueue
1675  * @mode: which threads
1676  * @nr_exclusive: how many wake-one or wake-many threads to wake up
1677  */
1678 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
1679 {
1680         unsigned long flags;
1681 
1682         spin_lock_irqsave(&q->lock, flags);
1683         __wake_up_common(q, mode, nr_exclusive, 0);
1684         spin_unlock_irqrestore(&q->lock, flags);
1685 }
1686 
1687 EXPORT_SYMBOL(__wake_up);
1688 
1689 /*
1690  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
1691  */
1692 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1693 {
1694         __wake_up_common(q, mode, 1, 0);
1695 }
1696 
1697 /**
1698  * __wake_up - sync- wake up threads blocked on a waitqueue.
1699  * @q: the waitqueue
1700  * @mode: which threads
1701  * @nr_exclusive: how many wake-one or wake-many threads to wake up
1702  *
1703  * The sync wakeup differs that the waker knows that it will schedule
1704  * away soon, so while the target thread will be woken up, it will not
1705  * be migrated to another CPU - ie. the two threads are 'synchronized'
1706  * with each other. This can prevent needless bouncing between CPUs.
1707  *
1708  * On UP it can prevent extra preemption.
1709  */
1710 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
1711 {
1712         unsigned long flags;
1713 
1714         if (unlikely(!q))
1715                 return;
1716 
1717         spin_lock_irqsave(&q->lock, flags);
1718         if (likely(nr_exclusive))
1719                 __wake_up_common(q, mode, nr_exclusive, 1);
1720         else
1721                 __wake_up_common(q, mode, nr_exclusive, 0);
1722         spin_unlock_irqrestore(&q->lock, flags);
1723 }
1724 
1725 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
1726 
1727 void complete(struct completion *x)
1728 {
1729         unsigned long flags;
1730 
1731         spin_lock_irqsave(&x->wait.lock, flags);
1732         x->done++;
1733         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
1734         spin_unlock_irqrestore(&x->wait.lock, flags);
1735 }
1736 
1737 EXPORT_SYMBOL(complete);
1738 
1739 void complete_all(struct completion *x)
1740 {
1741         unsigned long flags;
1742 
1743         spin_lock_irqsave(&x->wait.lock, flags);
1744         x->done += UINT_MAX/2;
1745         __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
1746         spin_unlock_irqrestore(&x->wait.lock, flags);
1747 }
1748 
1749 void wait_for_completion(struct completion *x)
1750 {
1751         might_sleep();
1752         spin_lock_irq(&x->wait.lock);
1753         if (!x->done) {
1754                 DECLARE_WAITQUEUE(wait, current);
1755 
1756                 wait.flags |= WQ_FLAG_EXCLUSIVE;
1757                 __add_wait_queue_tail(&x->wait, &wait);
1758                 do {
1759                         __set_current_state(TASK_UNINTERRUPTIBLE);
1760                         spin_unlock_irq(&x->wait.lock);
1761                         schedule();
1762                         spin_lock_irq(&x->wait.lock);
1763                 } while (!x->done);
1764                 __remove_wait_queue(&x->wait, &wait);
1765         }
1766         x->done--;
1767         spin_unlock_irq(&x->wait.lock);
1768 }
1769 
1770 EXPORT_SYMBOL(wait_for_completion);
1771 
1772 #define SLEEP_ON_VAR                            \
1773         unsigned long flags;                    \
1774         wait_queue_t wait;                      \
1775         init_waitqueue_entry(&wait, current);
1776 
1777 #define SLEEP_ON_HEAD                                   \
1778         spin_lock_irqsave(&q->lock,flags);              \
1779         __add_wait_queue(q, &wait);                     \
1780         spin_unlock(&q->lock);
1781 
1782 #define SLEEP_ON_TAIL                                           \
1783         spin_lock_irq(&q->lock);                                \
1784         __remove_wait_queue(q, &wait);                          \
1785         spin_unlock_irqrestore(&q->lock, flags);
1786 
1787 void interruptible_sleep_on(wait_queue_head_t *q)
1788 {
1789         SLEEP_ON_VAR
1790 
1791         current->state = TASK_INTERRUPTIBLE;
1792 
1793         SLEEP_ON_HEAD
1794         schedule();
1795         SLEEP_ON_TAIL
1796 }
1797 
1798 EXPORT_SYMBOL(interruptible_sleep_on);
1799 
1800 long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1801 {
1802         SLEEP_ON_VAR
1803 
1804         current->state = TASK_INTERRUPTIBLE;
1805 
1806         SLEEP_ON_HEAD
1807         timeout = schedule_timeout(timeout);
1808         SLEEP_ON_TAIL
1809 
1810         return timeout;
1811 }
1812 
1813 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
1814 
1815 void sleep_on(wait_queue_head_t *q)
1816 {
1817         SLEEP_ON_VAR
1818 
1819         current->state = TASK_UNINTERRUPTIBLE;
1820 
1821         SLEEP_ON_HEAD
1822         schedule();
1823         SLEEP_ON_TAIL
1824 }
1825 
1826 EXPORT_SYMBOL(sleep_on);
1827 
1828 long sleep_on_timeout(wait_queue_head_t *q, long timeout)
1829 {
1830         SLEEP_ON_VAR
1831 
1832         current->state = TASK_UNINTERRUPTIBLE;
1833 
1834         SLEEP_ON_HEAD
1835         timeout = schedule_timeout(timeout);
1836         SLEEP_ON_TAIL
1837 
1838         return timeout;
1839 }
1840 
1841 EXPORT_SYMBOL(sleep_on_timeout);
1842 
1843 void scheduling_functions_end_here(void) { }
1844 
1845 void set_user_nice(task_t *p, long nice)
1846 {
1847         unsigned long flags;
1848         prio_array_t *array;
1849         runqueue_t *rq;
1850         int old_prio, new_prio, delta;
1851 
1852         if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
1853                 return;
1854         /*
1855          * We have to be careful, if called from sys_setpriority(),
1856          * the task might be in the middle of scheduling on another CPU.
1857          */
1858         rq = task_rq_lock(p, &flags);
1859         /*
1860          * The RT priorities are set via setscheduler(), but we still
1861          * allow the 'normal' nice value to be set - but as expected
1862          * it wont have any effect on scheduling until the task is
1863          * not SCHED_NORMAL:
1864          */
1865         if (rt_task(p)) {
1866                 p->static_prio = NICE_TO_PRIO(nice);
1867                 goto out_unlock;
1868         }
1869         array = p->array;
1870         if (array)
1871                 dequeue_task(p, array);
1872 
1873         old_prio = p->prio;
1874         new_prio = NICE_TO_PRIO(nice);
1875         delta = new_prio - old_prio;
1876         p->static_prio = NICE_TO_PRIO(nice);
1877         p->prio += delta;
1878 
1879         if (array) {
1880                 enqueue_task(p, array);
1881                 /*
1882                  * If the task increased its priority or is running and
1883                  * lowered its priority, then reschedule its CPU:
1884                  */
1885                 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1886                         resched_task(rq->curr);
1887         }
1888 out_unlock:
1889         task_rq_unlock(rq, &flags);
1890 }
1891 
1892 EXPORT_SYMBOL(set_user_nice);
1893 
1894 #ifndef __alpha__
1895 
1896 /*
1897  * sys_nice - change the priority of the current process.
1898  * @increment: priority increment
1899  *
1900  * sys_setpriority is a more generic, but much slower function that
1901  * does similar things.
1902  */
1903 asmlinkage long sys_nice(int increment)
1904 {
1905         int retval;
1906         long nice;
1907 
1908         /*
1909          *      Setpriority might change our priority at the same moment.
1910          *      We don't have to worry. Conceptually one call occurs first
1911          *      and we have a single winner.
1912          */
1913         if (increment < 0) {
1914                 if (!capable(CAP_SYS_NICE))
1915                         return -EPERM;
1916                 if (increment < -40)
1917                         increment = -40;
1918         }
1919         if (increment > 40)
1920                 increment = 40;
1921 
1922         nice = PRIO_TO_NICE(current->static_prio) + increment;
1923         if (nice < -20)
1924                 nice = -20;
1925         if (nice > 19)
1926                 nice = 19;
1927 
1928         retval = security_task_setnice(current, nice);
1929         if (retval)
1930                 return retval;
1931 
1932         set_user_nice(current, nice);
1933         return 0;
1934 }
1935 
1936 #endif
1937 
1938 /**
1939  * task_prio - return the priority value of a given task.
1940  * @p: the task in question.
1941  *
1942  * This is the priority value as seen by users in /proc.
1943  * RT tasks are offset by -200. Normal tasks are centered
1944  * around 0, value goes from -16 to +15.
1945  */
1946 int task_prio(task_t *p)
1947 {
1948         return p->prio - MAX_RT_PRIO;
1949 }
1950 
1951 /**
1952  * task_nice - return the nice value of a given task.
1953  * @p: the task in question.
1954  */
1955 int task_nice(task_t *p)
1956 {
1957         return TASK_NICE(p);
1958 }
1959 
1960 EXPORT_SYMBOL(task_nice);
1961 
1962 /**
1963  * idle_cpu - is a given cpu idle currently?
1964  * @cpu: the processor in question.
1965  */
1966 int idle_cpu(int cpu)
1967 {
1968         return cpu_curr(cpu) == cpu_rq(cpu)->idle;
1969 }
1970 
1971 EXPORT_SYMBOL_GPL(idle_cpu);
1972 
1973 /**
1974  * find_process_by_pid - find a process with a matching PID value.
1975  * @pid: the pid in question.
1976  */
1977 static inline task_t *find_process_by_pid(pid_t pid)
1978 {
1979         return pid ? find_task_by_pid(pid) : current;
1980 }
1981 
1982 /*
1983  * setscheduler - change the scheduling policy and/or RT priority of a thread.
1984  */
1985 static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1986 {
1987         struct sched_param lp;
1988         int retval = -EINVAL;
1989         int oldprio;
1990         prio_array_t *array;
1991         unsigned long flags;
1992         runqueue_t *rq;
1993         task_t *p;
1994 
1995         if (!param || pid < 0)
1996                 goto out_nounlock;
1997 
1998         retval = -EFAULT;
1999         if (copy_from_user(&lp, param, sizeof(struct sched_param)))
2000                 goto out_nounlock;
2001 
2002         /*
2003          * We play safe to avoid deadlocks.
2004          */
2005         read_lock_irq(&tasklist_lock);
2006 
2007         p = find_process_by_pid(pid);
2008 
2009         retval = -ESRCH;
2010         if (!p)
2011                 goto out_unlock_tasklist;
2012 
2013         /*
2014          * To be able to change p->policy safely, the apropriate
2015          * runqueue lock must be held.
2016          */
2017         rq = task_rq_lock(p, &flags);
2018 
2019         if (policy < 0)
2020                 policy = p->policy;
2021         else {
2022                 retval = -EINVAL;
2023                 if (policy != SCHED_FIFO && policy != SCHED_RR &&
2024                                 policy != SCHED_NORMAL)
2025                         goto out_unlock;
2026         }
2027 
2028         /*
2029          * Valid priorities for SCHED_FIFO and SCHED_RR are
2030          * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
2031          */
2032         retval = -EINVAL;
2033         if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
2034                 goto out_unlock;
2035         if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
2036                 goto out_unlock;
2037 
2038         retval = -EPERM;
2039         if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
2040             !capable(CAP_SYS_NICE))
2041                 goto out_unlock;
2042         if ((current->euid != p->euid) && (current->euid != p->uid) &&
2043             !capable(CAP_SYS_NICE))
2044                 goto out_unlock;
2045 
2046         retval = security_task_setscheduler(p, policy, &lp);
2047         if (retval)
2048                 goto out_unlock;
2049 
2050         array = p->array;
2051         if (array)
2052                 deactivate_task(p, task_rq(p));
2053         retval = 0;
2054         p->policy = policy;
2055         p->rt_priority = lp.sched_priority;
2056         oldprio = p->prio;
2057         if (policy != SCHED_NORMAL)
2058                 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
2059         else
2060                 p->prio = p->static_prio;
2061         if (array) {
2062                 __activate_task(p, task_rq(p));
2063                 /*
2064                  * Reschedule if we are currently running on this runqueue and
2065                  * our priority decreased, or if we are not currently running on
2066                  * this runqueue and our priority is higher than the current's
2067                  */
2068                 if (rq->curr == p) {
2069                         if (p->prio > oldprio)
2070                                 resched_task(rq->curr);
2071                 } else if (p->prio < rq->curr->prio)
2072                         resched_task(rq->curr);
2073         }
2074 
2075 out_unlock:
2076         task_rq_unlock(rq, &flags);
2077 out_unlock_tasklist:
2078         read_unlock_irq(&tasklist_lock);
2079 
2080 out_nounlock:
2081         return retval;
2082 }
2083 
2084 /**
2085  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
2086  * @pid: the pid in question.
2087  * @policy: new policy
2088  * @param: structure containing the new RT priority.
2089  */
2090 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
2091                                       struct sched_param __user *param)
2092 {
2093         return setscheduler(pid, policy, param);
2094 }
2095 
2096 /**
2097  * sys_sched_setparam - set/change the RT priority of a thread
2098  * @pid: the pid in question.
2099  * @param: structure containing the new RT priority.
2100  */
2101 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
2102 {
2103         return setscheduler(pid, -1, param);
2104 }
2105 
2106 /**
2107  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
2108  * @pid: the pid in question.
2109  */
2110 asmlinkage long sys_sched_getscheduler(pid_t pid)
2111 {
2112         int retval = -EINVAL;
2113         task_t *p;
2114 
2115         if (pid < 0)
2116                 goto out_nounlock;
2117 
2118         retval = -ESRCH;
2119         read_lock(&tasklist_lock);
2120         p = find_process_by_pid(pid);
2121         if (p) {
2122                 retval = security_task_getscheduler(p);
2123                 if (!retval)
2124                         retval = p->policy;
2125         }
2126         read_unlock(&tasklist_lock);
2127 
2128 out_nounlock:
2129         return retval;
2130 }
2131 
2132 /**
2133  * sys_sched_getscheduler - get the RT priority of a thread
2134  * @pid: the pid in question.
2135  * @param: structure containing the RT priority.
2136  */
2137 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
2138 {
2139         struct sched_param lp;
2140         int retval = -EINVAL;
2141         task_t *p;
2142 
2143         if (!param || pid < 0)
2144                 goto out_nounlock;
2145 
2146         read_lock(&tasklist_lock);
2147         p = find_process_by_pid(pid);
2148         retval = -ESRCH;
2149         if (!p)
2150                 goto out_unlock;
2151 
2152         retval = security_task_getscheduler(p);
2153         if (retval)
2154                 goto out_unlock;
2155 
2156         lp.sched_priority = p->rt_priority;
2157         read_unlock(&tasklist_lock);
2158 
2159         /*
2160          * This one might sleep, we cannot do it with a spinlock held ...
2161          */
2162         retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
2163 
2164 out_nounlock:
2165         return retval;
2166 
2167 out_unlock:
2168         read_unlock(&tasklist_lock);
2169         return retval;
2170 }
2171 
2172 /**
2173  * sys_sched_setaffinity - set the cpu affinity of a process
2174  * @pid: pid of the process
2175  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2176  * @user_mask_ptr: user-space pointer to the new cpu mask
2177  */
2178 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
2179                                       unsigned long __user *user_mask_ptr)
2180 {
2181         cpumask_t new_mask;
2182         int retval;
2183         task_t *p;
2184 
2185         if (len < sizeof(new_mask))
2186                 return -EINVAL;
2187 
2188         if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
2189                 return -EFAULT;
2190 
2191         read_lock(&tasklist_lock);
2192 
2193         p = find_process_by_pid(pid);
2194         if (!p) {
2195                 read_unlock(&tasklist_lock);
2196                 return -ESRCH;
2197         }
2198 
2199         /*
2200          * It is not safe to call set_cpus_allowed with the
2201          * tasklist_lock held.  We will bump the task_struct's
2202          * usage count and then drop tasklist_lock.
2203          */
2204         get_task_struct(p);
2205         read_unlock(&tasklist_lock);
2206 
2207         retval = -EPERM;
2208         if ((current->euid != p->euid) && (current->euid != p->uid) &&
2209                         !capable(CAP_SYS_NICE))
2210                 goto out_unlock;
2211 
2212         retval = set_cpus_allowed(p, new_mask);
2213 
2214 out_unlock:
2215         put_task_struct(p);
2216         return retval;
2217 }
2218 
2219 /**
2220  * sys_sched_getaffinity - get the cpu affinity of a process
2221  * @pid: pid of the process
2222  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2223  * @user_mask_ptr: user-space pointer to hold the current cpu mask
2224  */
2225 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
2226                                       unsigned long __user *user_mask_ptr)
2227 {
2228         unsigned int real_len;
2229         cpumask_t mask;
2230         int retval;
2231         task_t *p;
2232 
2233         real_len = sizeof(mask);
2234         if (len < real_len)
2235                 return -EINVAL;
2236 
2237         read_lock(&tasklist_lock);
2238 
2239         retval = -ESRCH;
2240         p = find_process_by_pid(pid);
2241         if (!p)
2242                 goto out_unlock;
2243 
2244         retval = 0;
2245         cpus_and(mask, p->cpus_allowed, cpu_online_map);
2246 
2247 out_unlock:
2248         read_unlock(&tasklist_lock);
2249         if (retval)
2250                 return retval;
2251         if (copy_to_user(user_mask_ptr, &mask, real_len))
2252                 return -EFAULT;
2253         return real_len;
2254 }
2255 
2256 /**
2257  * sys_sched_yield - yield the current processor to other threads.
2258  *
2259  * this function yields the current CPU by moving the calling thread
2260  * to the expired array. If there are no other threads running on this
2261  * CPU then this function will return.
2262  */
2263 asmlinkage long sys_sched_yield(void)
2264 {
2265         runqueue_t *rq = this_rq_lock();
2266         prio_array_t *array = current->array;
2267 
2268         /*
2269          * We implement yielding by moving the task into the expired
2270          * queue.
2271          *
2272          * (special rule: RT tasks will just roundrobin in the active
2273          *  array.)
2274          */
2275         if (likely(!rt_task(current))) {
2276                 dequeue_task(current, array);
2277                 enqueue_task(current, rq->expired);
2278         } else {
2279                 list_del(&current->run_list);
2280                 list_add_tail(&current->run_list, array->queue + current->prio);
2281         }
2282         /*
2283          * Since we are going to call schedule() anyway, there's
2284          * no need to preempt:
2285          */
2286         _raw_spin_unlock(&rq->lock);
2287         preempt_enable_no_resched();
2288 
2289         schedule();
2290 
2291         return 0;
2292 }
2293 
2294 void __cond_resched(void)
2295 {
2296         set_current_state(TASK_RUNNING);
2297         schedule();
2298 }
2299 
2300 EXPORT_SYMBOL(__cond_resched);
2301 
2302 /**
2303  * yield - yield the current processor to other threads.
2304  *
2305  * this is a shortcut for kernel-space yielding - it marks the
2306  * thread runnable and calls sys_sched_yield().
2307  */
2308 void yield(void)
2309 {
2310         set_current_state(TASK_RUNNING);
2311         sys_sched_yield();
2312 }
2313 
2314 EXPORT_SYMBOL(yield);
2315 
2316 /*
2317  * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
2318  * that process accounting knows that this is a task in IO wait state.
2319  *
2320  * But don't do that if it is a deliberate, throttling IO wait (this task
2321  * has set its backing_dev_info: the queue against which it should throttle)
2322  */
2323 void io_schedule(void)
2324 {
2325         struct runqueue *rq = this_rq();
2326 
2327         atomic_inc(&rq->nr_iowait);
2328         schedule();
2329         atomic_dec(&rq->nr_iowait);
2330 }
2331 
2332 EXPORT_SYMBOL(io_schedule);
2333 
2334 long io_schedule_timeout(long timeout)
2335 {
2336         struct runqueue *rq = this_rq();
2337         long ret;
2338 
2339         atomic_inc(&rq->nr_iowait);
2340         ret = schedule_timeout(timeout);
2341         atomic_dec(&rq->nr_iowait);
2342         return ret;
2343 }
2344 
2345 /**
2346  * sys_sched_get_priority_max - return maximum RT priority.
2347  * @policy: scheduling class.
2348  *
2349  * this syscall returns the maximum rt_priority that can be used
2350  * by a given scheduling class.
2351  */
2352 asmlinkage long sys_sched_get_priority_max(int policy)
2353 {
2354         int ret = -EINVAL;
2355 
2356         switch (policy) {
2357         case SCHED_FIFO:
2358         case SCHED_RR:
2359                 ret = MAX_USER_RT_PRIO-1;
2360                 break;
2361         case SCHED_NORMAL:
2362                 ret = 0;
2363                 break;
2364         }
2365         return ret;
2366 }
2367 
2368 /**
2369  * sys_sched_get_priority_min - return minimum RT priority.
2370  * @policy: scheduling class.
2371  *
2372  * this syscall returns the minimum rt_priority that can be used
2373  * by a given scheduling class.
2374  */
2375 asmlinkage long sys_sched_get_priority_min(int policy)
2376 {
2377         int ret = -EINVAL;
2378 
2379         switch (policy) {
2380         case SCHED_FIFO:
2381         case SCHED_RR:
2382                 ret = 1;
2383                 break;
2384         case SCHED_NORMAL:
2385                 ret = 0;
2386         }
2387         return ret;
2388 }
2389 
2390 /**
2391  * sys_sched_rr_get_interval - return the default timeslice of a process.
2392  * @pid: pid of the process.
2393  * @interval: userspace pointer to the timeslice value.
2394  *
2395  * this syscall writes the default timeslice value of a given process
2396  * into the user-space timespec buffer. A value of '' means infinity.
2397  */
2398 asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
2399 {
2400         int retval = -EINVAL;
2401         struct timespec t;
2402         task_t *p;
2403 
2404         if (pid < 0)
2405                 goto out_nounlock;
2406 
2407         retval = -ESRCH;
2408         read_lock(&tasklist_lock);
2409         p = find_process_by_pid(pid);
2410         if (!p)
2411                 goto out_unlock;
2412 
2413         retval = security_task_getscheduler(p);
2414         if (retval)
2415                 goto out_unlock;
2416 
2417         jiffies_to_timespec(p->policy & SCHED_FIFO ?
2418                                 0 : task_timeslice(p), &t);
2419         read_unlock(&tasklist_lock);
2420         retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
2421 out_nounlock:
2422         return retval;
2423 out_unlock:
2424         read_unlock(&tasklist_lock);
2425         return retval;
2426 }
2427 
2428 static inline struct task_struct *eldest_child(struct task_struct *p)
2429 {
2430         if (list_empty(&p->children)) return NULL;
2431         return list_entry(p->children.next,struct task_struct,sibling);
2432 }
2433 
2434 static inline struct task_struct *older_sibling(struct task_struct *p)
2435 {
2436         if (p->sibling.prev==&p->parent->children) return NULL;
2437         return list_entry(p->sibling.prev,struct task_struct,sibling);
2438 }
2439 
2440 static inline struct task_struct *younger_sibling(struct task_struct *p)
2441 {
2442         if (p->sibling.next==&p->parent->children) return NULL;
2443         return list_entry(p->sibling.next,struct task_struct,sibling);
2444 }
2445 
2446 static void show_task(task_t * p)
2447 {
2448         unsigned long free = 0;
2449         task_t *relative;
2450         int state;
2451         static const char * stat_nam[] = { "R", "S", "D", "T", "Z", "W" };
2452 
2453         printk("%-13.13s ", p->comm);
2454         state = p->state ? __ffs(p->state) + 1 : 0;
2455         if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
2456                 printk(stat_nam[state]);
2457         else
2458                 printk(" ");
2459 #if (BITS_PER_LONG == 32)
2460         if (p == current)
2461                 printk(" current  ");
2462         else
2463                 printk(" %08lX ", thread_saved_pc(p));
2464 #else
2465         if (p == current)
2466                 printk("   current task   ");
2467         else
2468                 printk(" %016lx ", thread_saved_pc(p));
2469 #endif
2470         {
2471                 unsigned long * n = (unsigned long *) (p->thread_info+1);
2472                 while (!*n)
2473                         n++;
2474                 free = (unsigned long) n - (unsigned long)(p->thread_info+1);
2475         }
2476         printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
2477         if ((relative = eldest_child(p)))
2478                 printk("%5d ", relative->pid);
2479         else
2480                 printk("      ");
2481         if ((relative = younger_sibling(p)))
2482                 printk("%7d", relative->pid);
2483         else
2484                 printk("       ");
2485         if ((relative = older_sibling(p)))
2486                 printk(" %5d", relative->pid);
2487         else
2488                 printk("      ");
2489         if (!p->mm)
2490                 printk(" (L-TLB)\n");
2491         else
2492                 printk(" (NOTLB)\n");
2493 
2494         show_stack(p, NULL);
2495 }
2496 
2497 void show_state(void)
2498 {
2499         task_t *g, *p;
2500 
2501 #if (BITS_PER_LONG == 32)
2502         printk("\n"
2503                "                         free                        sibling\n");
2504         printk("  task             PC    stack   pid father child younger older\n");
2505 #else
2506         printk("\n"
2507                "                                 free                        sibling\n");
2508         printk("  task                 PC        stack   pid father child younger older\n");
2509 #endif
2510         read_lock(&tasklist_lock);
2511         do_each_thread(g, p) {
2512                 /*
2513                  * reset the NMI-timeout, listing all files on a slow
2514                  * console might take alot of time:
2515                  */
2516                 touch_nmi_watchdog();
2517                 show_task(p);
2518         } while_each_thread(g, p);
2519 
2520         read_unlock(&tasklist_lock);
2521 }
2522 
2523 void __init init_idle(task_t *idle, int cpu)
2524 {
2525         runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
2526         unsigned long flags;
2527 
2528         local_irq_save(flags);
2529         double_rq_lock(idle_rq, rq);
2530 
2531         idle_rq->curr = idle_rq->idle = idle;
2532         deactivate_task(idle, rq);
2533         idle->array = NULL;
2534         idle->prio = MAX_PRIO;
2535         idle->state = TASK_RUNNING;
2536         set_task_cpu(idle, cpu);
2537         double_rq_unlock(idle_rq, rq);
2538         set_tsk_need_resched(idle);
2539         local_irq_restore(flags);
2540 
2541         /* Set the preempt count _outside_ the spinlocks! */
2542 #ifdef CONFIG_PREEMPT
2543         idle->thread_info->preempt_count = (idle->lock_depth >= 0);
2544 #else
2545         idle->thread_info->preempt_count = 0;
2546 #endif
2547 }
2548 
2549 #ifdef CONFIG_SMP
2550 /*
2551  * This is how migration works:
2552  *
2553  * 1) we queue a migration_req_t structure in the source CPU's
2554  *    runqueue and wake up that CPU's migration thread.
2555  * 2) we down() the locked semaphore => thread blocks.
2556  * 3) migration thread wakes up (implicitly it forces the migrated
2557  *    thread off the CPU)
2558  * 4) it gets the migration request and checks whether the migrated
2559  *    task is still in the wrong runqueue.
2560  * 5) if it's in the wrong runqueue then the migration thread removes
2561  *    it and puts it into the right queue.
2562  * 6) migration thread up()s the semaphore.
2563  * 7) we wake up and the migration is done.
2564  */
2565 
2566 typedef struct {
2567         struct list_head list;
2568         task_t *task;
2569         struct completion done;
2570 } migration_req_t;
2571 
2572 /*
2573  * Change a given task's CPU affinity. Migrate the thread to a
2574  * proper CPU and schedule it away if the CPU it's executing on
2575  * is removed from the allowed bitmask.
2576  *
2577  * NOTE: the caller must have a valid reference to the task, the
2578  * task must not exit() & deallocate itself prematurely.  The
2579  * call is not atomic; no spinlocks may be held.
2580  */
2581 int set_cpus_allowed(task_t *p, cpumask_t new_mask)
2582 {
2583         unsigned long flags;
2584         migration_req_t req;
2585         runqueue_t *rq;
2586 
2587         if (any_online_cpu(new_mask) == NR_CPUS)
2588                 return -EINVAL;
2589 
2590         rq = task_rq_lock(p, &flags);
2591         p->cpus_allowed = new_mask;
2592         /*
2593          * Can the task run on the task's current CPU? If not then
2594          * migrate the thread off to a proper CPU.
2595          */
2596         if (cpu_isset(task_cpu(p), new_mask)) {
2597                 task_rq_unlock(rq, &flags);
2598                 return 0;
2599         }
2600         /*
2601          * If the task is not on a runqueue (and not running), then
2602          * it is sufficient to simply update the task's cpu field.
2603          */
2604         if (!p->array && !task_running(rq, p)) {
2605                 set_task_cpu(p, any_online_cpu(p->cpus_allowed));
2606                 task_rq_unlock(rq, &flags);
2607                 return 0;
2608         }
2609         init_completion(&req.done);
2610         req.task = p;
2611         list_add(&req.list, &rq->migration_queue);
2612         task_rq_unlock(rq, &flags);
2613 
2614         wake_up_process(rq->migration_thread);
2615 
2616         wait_for_completion(&req.done);
2617         return 0;
2618 }
2619 
2620 EXPORT_SYMBOL_GPL(set_cpus_allowed);
2621 
2622 /* Move (not current) task off this cpu, onto dest cpu. */
2623 static void move_task_away(struct task_struct *p, int dest_cpu)
2624 {
2625         runqueue_t *rq_dest;
2626         unsigned long flags;
2627 
2628         rq_dest = cpu_rq(dest_cpu);
2629 
2630         local_irq_save(flags);
2631         double_rq_lock(this_rq(), rq_dest);
2632         if (task_cpu(p) != smp_processor_id())
2633                 goto out; /* Already moved */
2634 
2635         set_task_cpu(p, dest_cpu);
2636         if (p->array) {
2637                 deactivate_task(p, this_rq());
2638                 activate_task(p, rq_dest);
2639                 if (p->prio < rq_dest->curr->prio)
2640                         resched_task(rq_dest->curr);
2641         }
2642  out:
2643         double_rq_unlock(this_rq(), rq_dest);
2644         local_irq_restore(flags);
2645 }
2646 
2647 typedef struct {
2648         int cpu;
2649         struct completion startup_done;
2650         task_t *task;
2651 } migration_startup_t;
2652 
2653 /*
2654  * migration_thread - this is a highprio system thread that performs
2655  * thread migration by bumping thread off CPU then 'pushing' onto
2656  * another runqueue.
2657  */
2658 static int migration_thread(void * data)
2659 {
2660         /* Marking "param" __user is ok, since we do a set_fs(KERNEL_DS); */
2661         struct sched_param __user param = { .sched_priority = MAX_RT_PRIO-1 };
2662         migration_startup_t *startup = data;
2663         int cpu = startup->cpu;
2664         runqueue_t *rq;
2665         int ret;
2666 
2667         startup->task = current;
2668         complete(&startup->startup_done);
2669         set_current_state(TASK_UNINTERRUPTIBLE);
2670         schedule();
2671 
2672         BUG_ON(smp_processor_id() != cpu);
2673 
2674         daemonize("migration/%d", cpu);
2675         set_fs(KERNEL_DS);
2676 
2677         ret = setscheduler(0, SCHED_FIFO, &param);
2678 
2679         rq = this_rq();
2680         rq->migration_thread = current;
2681 
2682         for (;;) {
2683                 struct list_head *head;
2684                 migration_req_t *req;
2685 
2686                 if (current->flags & PF_FREEZE)
2687                         refrigerator(PF_IOTHREAD);
2688 
2689                 spin_lock_irq(&rq->lock);
2690                 head = &rq->migration_queue;
2691                 current->state = TASK_INTERRUPTIBLE;
2692                 if (list_empty(head)) {
2693                         spin_unlock_irq(&rq->lock);
2694                         schedule();
2695                         continue;
2696                 }
2697                 req = list_entry(head->next, migration_req_t, list);
2698                 list_del_init(head->next);
2699                 spin_unlock_irq(&rq->lock);
2700 
2701                 move_task_away(req->task,
2702                                any_online_cpu(req->task->cpus_allowed));
2703                 complete(&req->done);
2704         }
2705 }
2706 
2707 /*
2708  * migration_call - callback that gets triggered when a CPU is added.
2709  * Here we can start up the necessary migration thread for the new CPU.
2710  */
2711 static int migration_call(struct notifier_block *nfb,
2712                           unsigned long action,
2713                           void *hcpu)
2714 {
2715         long cpu = (long) hcpu;
2716         migration_startup_t startup;
2717 
2718         switch (action) {
2719         case CPU_ONLINE:
2720 
2721                 printk("Starting migration thread for cpu %li\n", cpu);
2722 
2723                 startup.cpu = cpu;
2724                 startup.task = NULL;
2725                 init_completion(&startup.startup_done);
2726 
2727                 kernel_thread(migration_thread, &startup, CLONE_KERNEL);
2728                 wait_for_completion(&startup.startup_done);
2729                 wait_task_inactive(startup.task);
2730 
2731                 startup.task->thread_info->cpu = cpu;
2732                 startup.task->cpus_allowed = cpumask_of_cpu(cpu);
2733 
2734                 wake_up_process(startup.task);
2735 
2736                 while (!cpu_rq(cpu)->migration_thread)
2737                         yield();
2738 
2739                 break;
2740         }
2741         return NOTIFY_OK;
2742 }
2743 
2744 static struct notifier_block migration_notifier = { &migration_call, NULL, 0 };
2745 
2746 __init int migration_init(void)
2747 {
2748         /* Start one for boot CPU. */
2749         migration_call(&migration_notifier, CPU_ONLINE,
2750                        (void *)(long)smp_processor_id());
2751         register_cpu_notifier(&migration_notifier);
2752         return 0;
2753 }
2754 
2755 #endif
2756 
2757 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2758 /*
2759  * The 'big kernel lock'
2760  *
2761  * This spinlock is taken and released recursively by lock_kernel()
2762  * and unlock_kernel().  It is transparently dropped and reaquired
2763  * over schedule().  It is used to protect legacy code that hasn't
2764  * been migrated to a proper locking design yet.
2765  *
2766  * Don't use in new code.
2767  */
2768 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
2769 
2770 EXPORT_SYMBOL(kernel_flag);
2771 #endif
2772 
2773 static void kstat_init_cpu(int cpu)
2774 {
2775         /* Add any initialisation to kstat here */
2776         /* Useful when cpu offlining logic is added.. */
2777 }
2778 
2779 static int __devinit kstat_cpu_notify(struct notifier_block *self,
2780                                         unsigned long action, void *hcpu)
2781 {
2782         int cpu = (unsigned long)hcpu;
2783         switch(action) {
2784         case CPU_UP_PREPARE:
2785                 kstat_init_cpu(cpu);
2786                 break;
2787         default:
2788                 break;
2789         }
2790         return NOTIFY_OK;
2791 }
2792 
2793 static struct notifier_block __devinitdata kstat_nb = {
2794         .notifier_call  = kstat_cpu_notify,
2795         .next           = NULL,
2796 };
2797 
2798 __init static void init_kstat(void) {
2799         kstat_cpu_notify(&kstat_nb, (unsigned long)CPU_UP_PREPARE,
2800                         (void *)(long)smp_processor_id());
2801         register_cpu_notifier(&kstat_nb);
2802 }
2803 
2804 void __init sched_init(void)
2805 {
2806         runqueue_t *rq;
2807         int i, j, k;
2808 
2809         /* Init the kstat counters */
2810         init_kstat();
2811         for (i = 0; i < NR_CPUS; i++) {
2812                 prio_array_t *array;
2813 
2814                 rq = cpu_rq(i);
2815                 rq->active = rq->arrays;
2816                 rq->expired = rq->arrays + 1;
2817                 spin_lock_init(&rq->lock);
2818                 INIT_LIST_HEAD(&rq->migration_queue);
2819                 atomic_set(&rq->nr_iowait, 0);
2820                 nr_running_init(rq);
2821 
2822                 for (j = 0; j < 2; j++) {
2823                         array = rq->arrays + j;
2824                         for (k = 0; k < MAX_PRIO; k++) {
2825                                 INIT_LIST_HEAD(array->queue + k);
2826                                 __clear_bit(k, array->bitmap);
2827                         }
2828                         // delimiter for bitsearch
2829                         __set_bit(MAX_PRIO, array->bitmap);
2830                 }
2831         }
2832         /*
2833          * We have to do a little magic to get the first
2834          * thread right in SMP mode.
2835          */
2836         rq = this_rq();
2837         rq->curr = current;
2838         rq->idle = current;
2839         set_task_cpu(current, smp_processor_id());
2840         wake_up_forked_process(current);
2841 
2842         init_timers();
2843 
2844         /*
2845          * The boot idle thread does lazy MMU switching as well:
2846          */
2847         atomic_inc(&init_mm.mm_count);
2848         enter_lazy_tlb(&init_mm, current);
2849 }
2850 
2851 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
2852 void __might_sleep(char *file, int line)
2853 {
2854 #if defined(in_atomic)
2855         static unsigned long prev_jiffy;        /* ratelimiting */
2856 
2857         if ((in_atomic() || irqs_disabled()) && system_running) {
2858                 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
2859                         return;
2860                 prev_jiffy = jiffies;
2861                 printk(KERN_ERR "Debug: sleeping function called from invalid"
2862                                 " context at %s:%d\n", file, line);
2863                 printk("in_atomic():%d, irqs_disabled():%d\n",
2864                                 in_atomic(), irqs_disabled());
2865                 dump_stack();
2866         }
2867 #endif
2868 }
2869 EXPORT_SYMBOL(__might_sleep);
2870 #endif
2871 
2872 
2873 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
2874 /*
2875  * This could be a long-held lock.  If another CPU holds it for a long time,
2876  * and that CPU is not asked to reschedule then *this* CPU will spin on the
2877  * lock for a long time, even if *this* CPU is asked to reschedule.
2878  *
2879  * So what we do here, in the slow (contended) path is to spin on the lock by
2880  * hand while permitting preemption.
2881  *
2882  * Called inside preempt_disable().
2883  */
2884 void __preempt_spin_lock(spinlock_t *lock)
2885 {
2886         if (preempt_count() > 1) {
2887                 _raw_spin_lock(lock);
2888                 return;
2889         }
2890         do {
2891                 preempt_enable();
2892                 while (spin_is_locked(lock))
2893                         cpu_relax();
2894                 preempt_disable();
2895         } while (!_raw_spin_trylock(lock));
2896 }
2897 
2898 EXPORT_SYMBOL(__preempt_spin_lock);
2899 
2900 void __preempt_write_lock(rwlock_t *lock)
2901 {
2902         if (preempt_count() > 1) {
2903                 _raw_write_lock(lock);
2904                 return;
2905         }
2906 
2907         do {
2908                 preempt_enable();
2909                 while (rwlock_is_locked(lock))
2910                         cpu_relax();
2911                 preempt_disable();
2912         } while (!_raw_write_trylock(lock));
2913 }
2914 
2915 EXPORT_SYMBOL(__preempt_write_lock);
2916 #endif /* defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) */
2917 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~ [ freetext search ] ~ [ file search ] ~

This page was automatically generated by the LXR engine.
Visit the LXR main site for more information.