141/* struct worker is defined in workqueue_internal.h */ 142 143structworker_pool { ... 173/* 174 * The current concurrency level. As it's likely to be accessed 175 * from other CPUs during try_to_wake_up(), put it in a separate 176 * cacheline. 177 */ 178atomic_t nr_running ____cacheline_aligned_in_smp;
813/** 814 * wq_worker_waking_up - a worker is waking up 815 * @task: task waking up 816 * @cpu: CPU @task is waking up to 817 * 818 * This function is called during try_to_wake_up() when a worker is 819 * being awoken. 820 * 821 * CONTEXT: 822 * spin_lock_irq(rq->lock) 823 */ 824voidwq_worker_waking_up(struct task_struct *task, int cpu) 825 { 826structworker *worker = kthread_data(task); 827 828if (!(worker->flags & WORKER_NOT_RUNNING)) { 829 WARN_ON_ONCE(worker->pool->cpu != cpu); 830atomic_inc(&worker->pool->nr_running); 831 } 832 }
834/** 835 * wq_worker_sleeping - a worker is going to sleep 836 * @task: task going to sleep 837 * @cpu: CPU in question, must be the current CPU number 838 * 839 * This function is called during schedule() when a busy worker is 840 * going to sleep. Worker on the same cpu can be woken up by 841 * returning pointer to its task. 842 * 843 * CONTEXT: 844 * spin_lock_irq(rq->lock) 845 * 846 * Return: 847 * Worker task on @cpu to wake up, %NULL if none. 848 */ 849struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) 850 { 851structworker *worker = kthread_data(task), *to_wakeup = NULL; 852structworker_pool *pool; 853 854/* 855 * Rescuers, which may not have all the fields set up like normal 856 * workers, also reach here, let's not access anything before 857 * checking NOT_RUNNING. 858 */ 859if (worker->flags & WORKER_NOT_RUNNING) 860returnNULL; 861 862 pool = worker->pool; 863 864/* this can only happen on the local cpu */ 865if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) 866returnNULL; 867 868/* 869 * The counterpart of the following dec_and_test, implied mb, 870 * worklist not empty test sequence is in insert_work(). 871 * Please read comment there. 872 * 873 * NOT_RUNNING is clear. This means that we're bound to and 874 * running on the local cpu w/ rq lock held and preemption 875 * disabled, which in turn means that none else could be 876 * manipulating idle_list, so dereferencing idle_list without pool 877 * lock is safe. 878 */ 879if (atomic_dec_and_test(&pool->nr_running) && 880 !list_empty(&pool->worklist)) 881 to_wakeup = first_worker(pool); 882return to_wakeup ? to_wakeup->task : NULL; 883 }