diff --git a/kernel/sched/bfs.c b/kernel/sched/bfs.c index e430d062d2df..85bb250eca52 100644 --- a/kernel/sched/bfs.c +++ b/kernel/sched/bfs.c @@ -2929,11 +2929,9 @@ static void attach_task(struct rq *rq, struct task_struct *p) */ static void attach_one_task(struct rq *rq, struct task_struct *p) { - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock(&rq->lock); attach_task(rq, p); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock(&rq->lock); } static int active_load_balance_cpu_stop(void *data) @@ -2942,8 +2940,11 @@ static int active_load_balance_cpu_stop(void *data) unsigned long flags; struct task_struct *p; int target_cpu = rq->push_cpu; + bool migration = false; - raw_spin_lock_irqsave(&rq->lock, flags); + local_irq_save(flags); + + raw_spin_lock(&rq->lock); p = rq_first_queued_task(rq); if (unlikely(NULL == p)) @@ -2954,16 +2955,32 @@ static int active_load_balance_cpu_stop(void *data) goto unlock_out; } - detach_task(rq, p, target_cpu); + raw_spin_unlock(&rq->lock); + raw_spin_lock(&p->pi_lock); + raw_spin_lock(&rq->lock); + + /* + * _something_ may have changed the task, double check again + */ + if (likely(rq_first_queued_task(rq) == p && + rq == task_rq(p) && + cpumask_test_cpu(target_cpu, &p->cpus_allowed))) { + detach_task(rq, p, target_cpu); + migration = true; + } unlock_out: rq->active_balance = 0; - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock(&rq->lock); - if (p) { + if (migration) { struct rq *target_rq = cpu_rq(target_cpu); attach_one_task(target_rq, p); } + if (p) + raw_spin_unlock(&p->pi_lock); + + local_irq_restore(flags); return 0; }