From owner-svn-src-head@freebsd.org Tue Oct 18 13:55:35 2016 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 66755C1607C; Tue, 18 Oct 2016 13:55:35 +0000 (UTC) (envelope-from sbruno@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 3DD52262; Tue, 18 Oct 2016 13:55:35 +0000 (UTC) (envelope-from sbruno@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id u9IDtYDY009500; Tue, 18 Oct 2016 13:55:34 GMT (envelope-from sbruno@FreeBSD.org) Received: (from sbruno@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id u9IDtYNh009499; Tue, 18 Oct 2016 13:55:34 GMT (envelope-from sbruno@FreeBSD.org) Message-Id: <201610181355.u9IDtYNh009499@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: sbruno set sender to sbruno@FreeBSD.org using -f From: Sean Bruno Date: Tue, 18 Oct 2016 13:55:34 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r307566 - head/sys/kern X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 18 Oct 2016 13:55:35 -0000 Author: sbruno Date: Tue Oct 18 13:55:34 2016 New Revision: 307566 URL: https://svnweb.freebsd.org/changeset/base/307566 Log: Ensure that tasks with a specific cpu set prior to smp starting get re-attached to a thread running on that cpu. ref: https://github.com/NextBSD/NextBSD/commit/fcc20e306bc93ebbbe51f3775d1afb527970a2e9 Submitted by: mmacy@nextbsd.org Modified: head/sys/kern/subr_gtaskqueue.c Modified: head/sys/kern/subr_gtaskqueue.c ============================================================================== --- head/sys/kern/subr_gtaskqueue.c Tue Oct 18 13:39:55 2016 (r307565) +++ head/sys/kern/subr_gtaskqueue.c Tue Oct 18 13:55:34 2016 (r307566) @@ -554,7 +554,7 @@ struct taskq_bind_task { }; static void -taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx) +taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu) { struct taskqgroup_cpu *qcpu; @@ -564,7 +564,7 @@ taskqgroup_cpu_create(struct taskqgroup taskqueue_thread_enqueue, &qcpu->tgc_taskq); gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT, "%s_%d", qgroup->tqg_name, idx); - qcpu->tgc_cpu = idx * qgroup->tqg_stride; + qcpu->tgc_cpu = cpu; } static void @@ -633,8 +633,8 @@ taskqgroup_attach(struct taskqgroup *qgr qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; - gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu; if (irq != -1 && smp_started) { + gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu; CPU_ZERO(&mask); CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask); mtx_unlock(&qgroup->tqg_lock); @@ -643,6 +643,32 @@ taskqgroup_attach(struct taskqgroup *qgr mtx_unlock(&qgroup->tqg_lock); } +static void +taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) +{ + cpuset_t mask; + int qid, cpu; + + mtx_lock(&qgroup->tqg_lock); + qid = taskqgroup_find(qgroup, gtask->gt_uniq); + cpu = qgroup->tqg_queue[qid].tgc_cpu; + if (gtask->gt_irq != -1) { + mtx_unlock(&qgroup->tqg_lock); + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); + intr_setaffinity(gtask->gt_irq, &mask); + + mtx_lock(&qgroup->tqg_lock); + } + qgroup->tqg_queue[qid].tgc_cnt++; + + LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, + gt_list); + gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; + mtx_unlock(&qgroup->tqg_lock); +} + int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask, void *uniq, int cpu, int irq, char *name) @@ -671,13 +697,46 @@ taskqgroup_attach_cpu(struct taskqgroup qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; - if (irq != -1 && smp_started) { - CPU_ZERO(&mask); - CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask); - mtx_unlock(&qgroup->tqg_lock); + cpu = qgroup->tqg_queue[qid].tgc_cpu; + mtx_unlock(&qgroup->tqg_lock); + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); + if (irq != -1 && smp_started) intr_setaffinity(irq, &mask); - } else + return (0); +} + +static int +taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) +{ + cpuset_t mask; + int i, qid, irq, cpu; + + qid = -1; + irq = gtask->gt_irq; + cpu = gtask->gt_cpu; + MPASS(smp_started); + mtx_lock(&qgroup->tqg_lock); + for (i = 0; i < qgroup->tqg_cnt; i++) + if (qgroup->tqg_queue[i].tgc_cpu == cpu) { + qid = i; + break; + } + if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); + return (EINVAL); + } + qgroup->tqg_queue[qid].tgc_cnt++; + LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); + gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; + mtx_unlock(&qgroup->tqg_lock); + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); + + if (irq != -1) + intr_setaffinity(irq, &mask); return (0); } @@ -741,9 +800,8 @@ static int _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) { LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL); - cpuset_t mask; struct grouptask *gtask; - int i, k, old_cnt, qid, cpu; + int i, k, old_cnt, old_cpu, cpu; mtx_assert(&qgroup->tqg_lock, MA_OWNED); @@ -758,6 +816,9 @@ _taskqgroup_adjust(struct taskqgroup *qg } qgroup->tqg_adjusting = 1; old_cnt = qgroup->tqg_cnt; + old_cpu = 0; + if (old_cnt < cnt) + old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu; mtx_unlock(&qgroup->tqg_lock); /* * Set up queue for tasks added before boot. @@ -771,8 +832,13 @@ _taskqgroup_adjust(struct taskqgroup *qg /* * If new taskq threads have been added. */ - for (i = old_cnt; i < cnt; i++) - taskqgroup_cpu_create(qgroup, i); + cpu = old_cpu; + for (i = old_cnt; i < cnt; i++) { + for (k = 0; k < qgroup->tqg_stride; k++) + cpu = CPU_NEXT(cpu); + + taskqgroup_cpu_create(qgroup, i, cpu); + } mtx_lock(&qgroup->tqg_lock); qgroup->tqg_cnt = cnt; qgroup->tqg_stride = stride; @@ -788,39 +854,15 @@ _taskqgroup_adjust(struct taskqgroup *qg } } + mtx_unlock(&qgroup->tqg_lock); + while ((gtask = LIST_FIRST(>ask_head))) { LIST_REMOVE(gtask, gt_list); if (gtask->gt_cpu == -1) - qid = taskqgroup_find(qgroup, gtask->gt_uniq); - else { - for (i = 0; i < qgroup->tqg_cnt; i++) - if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) { - qid = i; - break; - } - } - qgroup->tqg_queue[qid].tgc_cnt++; - LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, - gt_list); - gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; - } - /* - * Set new CPU and IRQ affinity - */ - cpu = CPU_FIRST(); - for (i = 0; i < cnt; i++) { - qgroup->tqg_queue[i].tgc_cpu = cpu; - for (k = 0; k < qgroup->tqg_stride; k++) - cpu = CPU_NEXT(cpu); - CPU_ZERO(&mask); - CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask); - LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) { - if (gtask->gt_irq == -1) - continue; - intr_setaffinity(gtask->gt_irq, &mask); - } + taskqgroup_attach_deferred(qgroup, gtask); + else if (taskqgroup_attach_cpu_deferred(qgroup, gtask)) + taskqgroup_attach_deferred(qgroup, gtask); } - mtx_unlock(&qgroup->tqg_lock); /* * If taskq thread count has been reduced. @@ -837,12 +879,12 @@ _taskqgroup_adjust(struct taskqgroup *qg } int -taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride) +taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) { int error; mtx_lock(&qgroup->tqg_lock); - error = _taskqgroup_adjust(qgroup, cpu, stride); + error = _taskqgroup_adjust(qgroup, cnt, stride); mtx_unlock(&qgroup->tqg_lock); return (error);