Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 1 Aug 2010 18:48:43 GMT
From:      John Baldwin <jhb@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 181691 for review
Message-ID:  <201008011848.o71ImhN4034774@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://p4web.freebsd.org/@@181691?ac=10

Change 181691 by jhb@jhb_fiver on 2010/08/01 18:48:37

	- Implement ipi_cpu() for all architectures.
	- Use ipi_cpu() instead of ipi_selected() when sending an IPI to a
	  single CPU.

Affected files ...

.. //depot/projects/smpng/sys/amd64/amd64/mp_machdep.c#64 edit
.. //depot/projects/smpng/sys/amd64/include/smp.h#24 edit
.. //depot/projects/smpng/sys/i386/i386/mp_machdep.c#126 edit
.. //depot/projects/smpng/sys/i386/include/smp.h#32 edit
.. //depot/projects/smpng/sys/i386/xen/mp_machdep.c#16 edit
.. //depot/projects/smpng/sys/ia64/ia64/mp_machdep.c#49 edit
.. //depot/projects/smpng/sys/ia64/include/smp.h#12 edit
.. //depot/projects/smpng/sys/kern/sched_4bsd.c#95 edit
.. //depot/projects/smpng/sys/kern/sched_ule.c#104 edit
.. //depot/projects/smpng/sys/kern/subr_smp.c#61 edit
.. //depot/projects/smpng/sys/mips/include/smp.h#7 edit
.. //depot/projects/smpng/sys/mips/mips/mp_machdep.c#10 edit
.. //depot/projects/smpng/sys/notes#98 edit
.. //depot/projects/smpng/sys/powerpc/include/smp.h#9 edit
.. //depot/projects/smpng/sys/powerpc/powerpc/mp_machdep.c#19 edit
.. //depot/projects/smpng/sys/sparc64/include/smp.h#25 edit
.. //depot/projects/smpng/sys/sun4v/include/smp.h#8 edit
.. //depot/projects/smpng/sys/sun4v/sun4v/mp_machdep.c#9 edit

Differences ...

==== //depot/projects/smpng/sys/amd64/amd64/mp_machdep.c#64 (text+ko) ====

@@ -1239,15 +1239,51 @@
 			do {
 				old_pending = cpu_ipi_pending[cpu];
 				new_pending = old_pending | bitmap;
-			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));	
-
+			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+			    old_pending, new_pending));	
 			if (old_pending)
 				continue;
 		}
+		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+	}
+}
 
-		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+/*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	u_int bitmap = 0;
+	u_int old_pending;
+	u_int new_pending;
+
+	if (IPI_IS_BITMAPED(ipi)) { 
+		bitmap = 1 << ipi;
+		ipi = IPI_BITMAP_VECTOR;
 	}
 
+	/*
+	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
+	 * of help in order to understand what is the source.
+	 * Set the mask of receiving CPUs for this purpose.
+	 */
+	if (ipi == IPI_STOP_HARD)
+		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+
+	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+	KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
+
+	if (bitmap) {
+		do {
+			old_pending = cpu_ipi_pending[cpu];
+			new_pending = old_pending | bitmap;
+		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+		    old_pending, new_pending));	
+		if (old_pending)
+			return;
+	}
+	lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 }
 
 /*

==== //depot/projects/smpng/sys/amd64/include/smp.h#24 (text+ko) ====

@@ -60,10 +60,11 @@
 void	cpustop_handler(void);
 void	cpususpend_handler(void);
 void	init_secondary(void);
+void	ipi_all_but_self(u_int ipi);
+void 	ipi_bitmap_handler(struct trapframe frame);
+void	ipi_cpu(int cpu, u_int ipi);
 int	ipi_nmi_handler(void);
 void	ipi_selected(cpumask_t cpus, u_int ipi);
-void	ipi_all_but_self(u_int ipi);
-void 	ipi_bitmap_handler(struct trapframe frame);
 u_int	mp_bootaddress(u_int);
 int	mp_grab_cpu_hlt(void);
 void	smp_cache_flush(void);

==== //depot/projects/smpng/sys/i386/i386/mp_machdep.c#126 (text+ko) ====

@@ -1327,15 +1327,51 @@
 			do {
 				old_pending = cpu_ipi_pending[cpu];
 				new_pending = old_pending | bitmap;
-			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));	
-
+			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+			    old_pending, new_pending));	
 			if (old_pending)
 				continue;
 		}
+		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+	}
+}
 
-		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+/*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	u_int bitmap = 0;
+	u_int old_pending;
+	u_int new_pending;
+
+	if (IPI_IS_BITMAPED(ipi)) { 
+		bitmap = 1 << ipi;
+		ipi = IPI_BITMAP_VECTOR;
 	}
 
+	/*
+	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
+	 * of help in order to understand what is the source.
+	 * Set the mask of receiving CPUs for this purpose.
+	 */
+	if (ipi == IPI_STOP_HARD)
+		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+
+	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+	KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
+
+	if (bitmap) {
+		do {
+			old_pending = cpu_ipi_pending[cpu];
+			new_pending = old_pending | bitmap;
+		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+		    old_pending, new_pending));	
+		if (old_pending)
+			return;
+	}
+	lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 }
 
 /*

==== //depot/projects/smpng/sys/i386/include/smp.h#32 (text+ko) ====

@@ -60,12 +60,13 @@
 void	cpu_add(u_int apic_id, char boot_cpu);
 void	cpustop_handler(void);
 void	init_secondary(void);
-int	ipi_nmi_handler(void);
-void	ipi_selected(cpumask_t cpus, u_int ipi);
 void	ipi_all_but_self(u_int ipi);
 #ifndef XEN
 void 	ipi_bitmap_handler(struct trapframe frame);
 #endif
+void	ipi_cpu(int cpu, u_int ipi);
+int	ipi_nmi_handler(void);
+void	ipi_selected(cpumask_t cpus, u_int ipi);
 u_int	mp_bootaddress(u_int);
 int	mp_grab_cpu_hlt(void);
 void	smp_cache_flush(void);

==== //depot/projects/smpng/sys/i386/xen/mp_machdep.c#16 (text+ko) ====

@@ -1121,19 +1121,14 @@
 		cpu--;
 		cpus &= ~(1 << cpu);
 
-		KASSERT(cpu_apic_ids[cpu] != -1,
-		    ("IPI to non-existent CPU %d", cpu));
-
 		if (bitmap) {
 			do {
 				old_pending = cpu_ipi_pending[cpu];
 				new_pending = old_pending | bitmap;
-			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));	
-
+			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+			    old_pending, new_pending));	
 			if (!old_pending)
 				ipi_pcpu(cpu, RESCHEDULE_VECTOR);
-			continue;
-			
 		} else {
 			KASSERT(call_data != NULL, ("call_data not set"));
 			ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
@@ -1142,6 +1137,45 @@
 }
 
 /*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	u_int bitmap = 0;
+	u_int old_pending;
+	u_int new_pending;
+	
+	if (IPI_IS_BITMAPED(ipi)) { 
+		bitmap = 1 << ipi;
+		ipi = IPI_BITMAP_VECTOR;
+	} 
+
+	/*
+	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
+	 * of help in order to understand what is the source.
+	 * Set the mask of receiving CPUs for this purpose.
+	 */
+	if (ipi == IPI_STOP_HARD)
+		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+
+	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+
+	if (bitmap) {
+		do {
+			old_pending = cpu_ipi_pending[cpu];
+			new_pending = old_pending | bitmap;
+		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+		    old_pending, new_pending));	
+		if (!old_pending)
+			ipi_pcpu(cpu, RESCHEDULE_VECTOR);
+	} else {
+		KASSERT(call_data != NULL, ("call_data not set"));
+		ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
+	}
+}
+
+/*
  * send an IPI to all CPUs EXCEPT myself
  */
 void

==== //depot/projects/smpng/sys/ia64/ia64/mp_machdep.c#49 (text+ko) ====

@@ -408,6 +408,16 @@
 }
 
 /*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
+/*
  * send an IPI to all CPUs EXCEPT myself.
  */
 void

==== //depot/projects/smpng/sys/ia64/include/smp.h#12 (text+ko) ====

@@ -25,6 +25,7 @@
 extern int ia64_ipi_wakeup;
 
 void	ipi_all_but_self(int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(cpumask_t cpus, int ipi);
 void	ipi_send(struct pcpu *, int ipi);
 

==== //depot/projects/smpng/sys/kern/sched_4bsd.c#95 (text+ko) ====

@@ -1154,7 +1154,7 @@
 	pcpu = pcpu_find(cpuid);
 	if (idle_cpus_mask & pcpu->pc_cpumask) {
 		forward_wakeups_delivered++;
-		ipi_selected(pcpu->pc_cpumask, IPI_AST);
+		ipi_cpu(cpuid, IPI_AST);
 		return;
 	}
 
@@ -1167,13 +1167,13 @@
 	if (pri <= PRI_MAX_ITHD)
 #endif /* ! FULL_PREEMPTION */
 	{
-		ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
+		ipi_cpu(cpuid, IPI_PREEMPT);
 		return;
 	}
 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
 
 	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
-	ipi_selected(pcpu->pc_cpumask, IPI_AST);
+	ipi_cpu(cpuid, IPI_AST);
 	return;
 }
 #endif /* SMP */
@@ -1666,7 +1666,7 @@
 
 		td->td_flags |= TDF_NEEDRESCHED;
 		if (td != curthread)
-			ipi_selected(1 << cpu, IPI_AST);
+			ipi_cpu(cpu, IPI_AST);
 		break;
 	default:
 		break;

==== //depot/projects/smpng/sys/kern/sched_ule.c#104 (text+ko) ====

@@ -851,7 +851,7 @@
 		 * IPI the target cpu to force it to reschedule with the new
 		 * workload.
 		 */
-		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
+		ipi_cpu(TDQ_ID(low), IPI_PREEMPT);
 	}
 	tdq_unlock_pair(high, low);
 	return (moved);
@@ -974,7 +974,7 @@
 			return;
 	}
 	tdq->tdq_ipipending = 1;
-	ipi_selected(1 << cpu, IPI_PREEMPT);
+	ipi_cpu(cpu, IPI_PREEMPT);
 }
 
 /*
@@ -2411,7 +2411,7 @@
 	cpu = ts->ts_cpu;
 	ts->ts_cpu = sched_pickcpu(td, 0);
 	if (cpu != PCPU_GET(cpuid))
-		ipi_selected(1 << cpu, IPI_PREEMPT);
+		ipi_cpu(cpu, IPI_PREEMPT);
 #endif
 }
 

==== //depot/projects/smpng/sys/kern/subr_smp.c#61 (text+ko) ====

@@ -181,7 +181,7 @@
 	id = td->td_oncpu;
 	if (id == NOCPU)
 		return;
-	ipi_selected(1 << id, IPI_AST);
+	ipi_cpu(id, IPI_AST);
 }
 
 /*

==== //depot/projects/smpng/sys/mips/include/smp.h#7 (text+ko) ====

@@ -30,6 +30,7 @@
 
 #ifndef LOCORE
 
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(cpumask_t cpus, int ipi);
 void	smp_init_secondary(u_int32_t cpuid);
 void	mpentry(void);

==== //depot/projects/smpng/sys/mips/mips/mp_machdep.c#10 (text+ko) ====

@@ -85,6 +85,15 @@
 	}
 }
 
+/* Send an IPI to a specific CPU. */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
 /*
  * Handle an IPI sent to this processor.
  */

==== //depot/projects/smpng/sys/notes#98 (text+ko) ====

@@ -60,6 +60,7 @@
   recursion) and panic if we try to sleep with any held to provide a cheaper
   version of the current WITNESS check that doesn't bog the system down quite
   as bad.
+- ipi_cpu()
 
 Active child branches:
 - jhb_intr - misc interrupt stuff

==== //depot/projects/smpng/sys/powerpc/include/smp.h#9 (text+ko) ====

@@ -40,6 +40,7 @@
 #ifndef LOCORE
 
 void	ipi_all_but_self(int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(cpumask_t cpus, int ipi);
 
 struct cpuref {

==== //depot/projects/smpng/sys/powerpc/powerpc/mp_machdep.c#19 (text+ko) ====

@@ -336,6 +336,14 @@
 	}
 }
 
+/* Send an IPI to a specific CPU. */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
 /* Send an IPI to all CPUs EXCEPT myself. */
 void
 ipi_all_but_self(int ipi)

==== //depot/projects/smpng/sys/sparc64/include/smp.h#25 (text+ko) ====

@@ -133,6 +133,17 @@
 	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
 }
 
+static __inline void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	/*
+	 * XXX: Not ideal, but would require more work to add a cpu_ipi_cpu
+	 * function pointer.
+	 */
+	cpu_ipi_selected(1 << cpu, 0, (u_long)tl_ipi_level, ipi);
+}
+
 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
 
 static __inline void *

==== //depot/projects/smpng/sys/sun4v/include/smp.h#8 (text+ko) ====

@@ -82,8 +82,9 @@
 void cpu_ipi_stop(struct trapframe *tf);
 void cpu_ipi_preempt(struct trapframe *tf);
 
+void	ipi_all_but_self(u_int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(u_int cpus, u_int ipi);
-void	ipi_all_but_self(u_int ipi);
 
 vm_offset_t mp_tramp_alloc(void);
 void        mp_set_tsb_desc_ra(vm_paddr_t);

==== //depot/projects/smpng/sys/sun4v/sun4v/mp_machdep.c#9 (text+ko) ====

@@ -518,7 +518,6 @@
 	}
 }
 
-
 void
 ipi_selected(u_int icpus, u_int ipi)
 {
@@ -533,7 +532,6 @@
 	 * 4) handling 4-way threading vs 2-way threading should happen here
 	 *    and not in forward wakeup
 	 */
-	
 	cpulist = PCPU_GET(cpulist);
 	cpus = (icpus & ~PCPU_GET(cpumask));
 	
@@ -545,8 +543,33 @@
 		cpu_count++;
 	}
 
-	cpu_ipi_selected(cpu_count, cpulist, (u_long)tl_ipi_level, ipi, 0, &ackmask);
-	
+	cpu_ipi_selected(cpu_count, cpulist, (u_long)tl_ipi_level, ipi, 0,
+	    &ackmask);
+}
+
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	int i, cpu_count;
+	uint16_t *cpulist;
+	cpumask_t cpus;
+	uint64_t ackmask;
+
+	/* 
+	 * 
+	 * 3) forward_wakeup appears to abuse ASTs
+	 * 4) handling 4-way threading vs 2-way threading should happen here
+	 *    and not in forward wakeup
+	 */
+	cpulist = PCPU_GET(cpulist);
+	if (PCPU_GET(cpumask) & (1 << cpu))
+		cpu_count = 0;
+	else {
+		cpulist[0] = (uint16_t)cpu;
+		cpu_count = 1;
+	}
+	cpu_ipi_selected(cpu_count, cpulist, (u_long)tl_ipi_level, ipi, 0,
+	    &ackmask);
 }
 
 void



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201008011848.o71ImhN4034774>