Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-3.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
"This contains one important fix. The NUMA support added a while back
broke ordering guarantees on ordered workqueues. It was enforced by
having single frontend interface with @max_active == 1 but the NUMA
support puts multiple interfaces on unbound workqueues on NUMA
machines thus breaking the ordered guarantee. This is fixed by
disabling NUMA support on ordered workqueues.

The above and a couple other patches were sitting in for-3.12-fixes
but I forgot to push that out, so they ended up waiting a bit too
long. My aplogies.

Other fixes are minor"

* 'for-3.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: fix pool ID allocation leakage and remove BUILD_BUG_ON() in init_workqueues
workqueue: fix comment typo for __queue_work()
workqueue: fix ordered workqueues in NUMA setups
workqueue: swap set_cpus_allowed_ptr() and PF_NO_SETAFFINITY

+37 -13
+37 -13
kernel/workqueue.c
··· 305 305 /* I: attributes used when instantiating standard unbound pools on demand */ 306 306 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 307 307 308 + /* I: attributes used when instantiating ordered pools on demand */ 309 + static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 310 + 308 311 struct workqueue_struct *system_wq __read_mostly; 309 312 EXPORT_SYMBOL(system_wq); 310 313 struct workqueue_struct *system_highpri_wq __read_mostly; ··· 521 518 static inline void debug_work_deactivate(struct work_struct *work) { } 522 519 #endif 523 520 524 - /* allocate ID and assign it to @pool */ 521 + /** 522 + * worker_pool_assign_id - allocate ID and assing it to @pool 523 + * @pool: the pool pointer of interest 524 + * 525 + * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 526 + * successfully, -errno on failure. 527 + */ 525 528 static int worker_pool_assign_id(struct worker_pool *pool) 526 529 { 527 530 int ret; 528 531 529 532 lockdep_assert_held(&wq_pool_mutex); 530 533 531 - ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 534 + ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 535 + GFP_KERNEL); 532 536 if (ret >= 0) { 533 537 pool->id = ret; 534 538 return 0; ··· 1330 1320 1331 1321 debug_work_activate(work); 1332 1322 1333 - /* if dying, only works from the same workqueue are allowed */ 1323 + /* if draining, only works from the same workqueue are allowed */ 1334 1324 if (unlikely(wq->flags & __WQ_DRAINING) && 1335 1325 WARN_ON_ONCE(!is_chained_work(wq))) 1336 1326 return; ··· 1746 1736 if (IS_ERR(worker->task)) 1747 1737 goto fail; 1748 1738 1739 + set_user_nice(worker->task, pool->attrs->nice); 1740 + 1741 + /* prevent userland from meddling with cpumask of workqueue workers */ 1742 + worker->task->flags |= PF_NO_SETAFFINITY; 1743 + 1749 1744 /* 1750 1745 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1751 1746 * online CPUs. It'll be re-applied when any of the CPUs come up. 1752 1747 */ 1753 - set_user_nice(worker->task, pool->attrs->nice); 1754 1748 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1755 - 1756 - /* prevent userland from meddling with cpumask of workqueue workers */ 1757 - worker->task->flags |= PF_NO_SETAFFINITY; 1758 1749 1759 1750 /* 1760 1751 * The caller is responsible for ensuring %POOL_DISASSOCIATED ··· 4117 4106 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4118 4107 { 4119 4108 bool highpri = wq->flags & WQ_HIGHPRI; 4120 - int cpu; 4109 + int cpu, ret; 4121 4110 4122 4111 if (!(wq->flags & WQ_UNBOUND)) { 4123 4112 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); ··· 4137 4126 mutex_unlock(&wq->mutex); 4138 4127 } 4139 4128 return 0; 4129 + } else if (wq->flags & __WQ_ORDERED) { 4130 + ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4131 + /* there should only be single pwq for ordering guarantee */ 4132 + WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4133 + wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4134 + "ordering guarantee broken for workqueue %s\n", wq->name); 4135 + return ret; 4140 4136 } else { 4141 4137 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4142 4138 } ··· 5027 5009 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5028 5010 int i, cpu; 5029 5011 5030 - /* make sure we have enough bits for OFFQ pool ID */ 5031 - BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < 5032 - WORK_CPU_END * NR_STD_WORKER_POOLS); 5033 - 5034 5012 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5035 5013 5036 5014 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); ··· 5065 5051 } 5066 5052 } 5067 5053 5068 - /* create default unbound wq attrs */ 5054 + /* create default unbound and ordered wq attrs */ 5069 5055 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 5070 5056 struct workqueue_attrs *attrs; 5071 5057 5072 5058 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5073 5059 attrs->nice = std_nice[i]; 5074 5060 unbound_std_wq_attrs[i] = attrs; 5061 + 5062 + /* 5063 + * An ordered wq should have only one pwq as ordering is 5064 + * guaranteed by max_active which is enforced by pwqs. 5065 + * Turn off NUMA so that dfl_pwq is used for all nodes. 5066 + */ 5067 + BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5068 + attrs->nice = std_nice[i]; 5069 + attrs->no_numa = true; 5070 + ordered_wq_attrs[i] = attrs; 5075 5071 } 5076 5072 5077 5073 system_wq = alloc_workqueue("events", 0, 0);