From owner-svn-src-all@FreeBSD.ORG Thu Apr 2 01:02:50 2015 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 2EA7518E; Thu, 2 Apr 2015 01:02:50 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 16A8182E; Thu, 2 Apr 2015 01:02:50 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id t3212nO1021513; Thu, 2 Apr 2015 01:02:49 GMT (envelope-from jhb@FreeBSD.org) Received: (from jhb@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id t3212lTO021499; Thu, 2 Apr 2015 01:02:47 GMT (envelope-from jhb@FreeBSD.org) Message-Id: <201504020102.t3212lTO021499@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: jhb set sender to jhb@FreeBSD.org using -f From: John Baldwin Date: Thu, 2 Apr 2015 01:02:47 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org Subject: svn commit: r280973 - in stable: 10/sys/amd64/amd64 10/sys/dev/acpica 10/sys/i386/i386 10/sys/kern 10/sys/sys 10/sys/x86/x86 9/sys/amd64/amd64 9/sys/dev/acpica 9/sys/i386/i386 9/sys/kern 9/sys/sys ... X-SVN-Group: stable-9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 02 Apr 2015 01:02:50 -0000 Author: jhb Date: Thu Apr 2 01:02:42 2015 New Revision: 280973 URL: https://svnweb.freebsd.org/changeset/base/280973 Log: MFC 276724: On some Intel CPUs with a P-state but not C-state invariant TSC the TSC may also halt in C2 and not just C3 (it seems that in some cases the BIOS advertises its C3 state as a C2 state in _CST). Just play it safe and disable both C2 and C3 states if a user forces the use of the TSC as the timecounter on such CPUs. PR: 192316 Modified: stable/9/sys/amd64/amd64/machdep.c stable/9/sys/dev/acpica/acpi_cpu.c stable/9/sys/i386/i386/machdep.c stable/9/sys/kern/kern_clocksource.c stable/9/sys/kern/kern_tc.c stable/9/sys/sys/systm.h stable/9/sys/sys/timetc.h stable/9/sys/x86/x86/tsc.c Directory Properties: stable/9/sys/ (props changed) stable/9/sys/dev/ (props changed) stable/9/sys/sys/ (props changed) Changes in other areas also in this revision: Modified: stable/10/sys/amd64/amd64/machdep.c stable/10/sys/dev/acpica/acpi_cpu.c stable/10/sys/i386/i386/machdep.c stable/10/sys/kern/kern_clocksource.c stable/10/sys/kern/kern_tc.c stable/10/sys/sys/systm.h stable/10/sys/sys/timetc.h stable/10/sys/x86/x86/tsc.c Directory Properties: stable/10/ (props changed) Modified: stable/9/sys/amd64/amd64/machdep.c ============================================================================== --- stable/9/sys/amd64/amd64/machdep.c Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/amd64/amd64/machdep.c Thu Apr 2 01:02:42 2015 (r280973) @@ -797,7 +797,7 @@ cpu_idle(int busy) } /* Apply AMD APIC timer C1E workaround. */ - if (cpu_ident_amdc1e && cpu_disable_deep_sleep) { + if (cpu_ident_amdc1e && cpu_disable_c3_sleep) { msr = rdmsr(MSR_AMDK8_IPM); if (msr & AMDK8_CMPHALT) wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); Modified: stable/9/sys/dev/acpica/acpi_cpu.c ============================================================================== --- stable/9/sys/dev/acpica/acpi_cpu.c Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/dev/acpica/acpi_cpu.c Thu Apr 2 01:02:42 2015 (r280973) @@ -85,6 +85,7 @@ struct acpi_cpu_softc { int cpu_prev_sleep;/* Last idle sleep duration. */ int cpu_features; /* Child driver supported features. */ /* Runtime state. */ + int cpu_non_c2; /* Index of lowest non-C2 state. */ int cpu_non_c3; /* Index of lowest non-C3 state. */ u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ /* Values for sysctl. */ @@ -664,8 +665,10 @@ acpi_cpu_generic_cx_probe(struct acpi_cp cx_ptr->type = ACPI_STATE_C1; cx_ptr->trans_lat = 0; cx_ptr++; + sc->cpu_non_c2 = sc->cpu_cx_count; sc->cpu_non_c3 = sc->cpu_cx_count; sc->cpu_cx_count++; + cpu_deepest_sleep = 1; /* * The spec says P_BLK must be 6 bytes long. However, some systems @@ -691,6 +694,7 @@ acpi_cpu_generic_cx_probe(struct acpi_cp cx_ptr++; sc->cpu_non_c3 = sc->cpu_cx_count; sc->cpu_cx_count++; + cpu_deepest_sleep = 2; } } if (sc->cpu_p_blk_len < 6) @@ -707,7 +711,7 @@ acpi_cpu_generic_cx_probe(struct acpi_cp cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; cx_ptr++; sc->cpu_cx_count++; - cpu_can_deep_sleep = 1; + cpu_deepest_sleep = 3; } } } @@ -753,6 +757,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s count = MAX_CX_STATES; } + sc->cpu_non_c2 = 0; sc->cpu_non_c3 = 0; sc->cpu_cx_count = 0; cx_ptr = sc->cpu_cx_states; @@ -764,6 +769,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s cx_ptr->type = ACPI_STATE_C0; cx_ptr++; sc->cpu_cx_count++; + cpu_deepest_sleep = 1; /* Set up all valid states. */ for (i = 0; i < count; i++) { @@ -784,6 +790,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s /* This is the first C1 state. Use the reserved slot. */ sc->cpu_cx_states[0] = *cx_ptr; } else { + sc->cpu_non_c2 = sc->cpu_cx_count; sc->cpu_non_c3 = sc->cpu_cx_count; cx_ptr++; sc->cpu_cx_count++; @@ -791,6 +798,8 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s continue; case ACPI_STATE_C2: sc->cpu_non_c3 = sc->cpu_cx_count; + if (cpu_deepest_sleep < 2) + cpu_deepest_sleep = 2; break; case ACPI_STATE_C3: default: @@ -800,7 +809,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s device_get_unit(sc->cpu_dev), i)); continue; } else - cpu_can_deep_sleep = 1; + cpu_deepest_sleep = 3; break; } @@ -981,7 +990,9 @@ acpi_cpu_idle() /* Find the lowest state that has small enough latency. */ cx_next_idx = 0; - if (cpu_disable_deep_sleep) + if (cpu_disable_c2_sleep) + i = min(sc->cpu_cx_lowest, sc->cpu_non_c2); + else if (cpu_disable_c3_sleep) i = min(sc->cpu_cx_lowest, sc->cpu_non_c3); else i = sc->cpu_cx_lowest; Modified: stable/9/sys/i386/i386/machdep.c ============================================================================== --- stable/9/sys/i386/i386/machdep.c Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/i386/i386/machdep.c Thu Apr 2 01:02:42 2015 (r280973) @@ -1373,7 +1373,7 @@ cpu_idle(int busy) #ifndef XEN /* Apply AMD APIC timer C1E workaround. */ - if (cpu_ident_amdc1e && cpu_disable_deep_sleep) { + if (cpu_ident_amdc1e && cpu_disable_c3_sleep) { msr = rdmsr(MSR_AMDK8_IPM); if (msr & AMDK8_CMPHALT) wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); Modified: stable/9/sys/kern/kern_clocksource.c ============================================================================== --- stable/9/sys/kern/kern_clocksource.c Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/kern/kern_clocksource.c Thu Apr 2 01:02:42 2015 (r280973) @@ -59,8 +59,9 @@ __FBSDID("$FreeBSD$"); cyclic_clock_func_t cyclic_clock_func = NULL; #endif -int cpu_can_deep_sleep = 0; /* C3 state is available. */ -int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ +int cpu_deepest_sleep = 0; /* Deepest Cx state available. */ +int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */ +int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */ static void setuptimer(void); static void loadtimer(struct bintime *now, int first); @@ -655,7 +656,7 @@ cpu_initclocks_bsp(void) else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) periodic = 1; if (timer->et_flags & ET_FLAGS_C3STOP) - cpu_disable_deep_sleep++; + cpu_disable_c3_sleep++; /* * We honor the requested 'hz' value. @@ -939,9 +940,9 @@ sysctl_kern_eventtimer_timer(SYSCTL_HAND configtimer(0); et_free(timer); if (et->et_flags & ET_FLAGS_C3STOP) - cpu_disable_deep_sleep++; + cpu_disable_c3_sleep++; if (timer->et_flags & ET_FLAGS_C3STOP) - cpu_disable_deep_sleep--; + cpu_disable_c3_sleep--; periodic = want_periodic; timer = et; et_init(timer, timercb, NULL, NULL); Modified: stable/9/sys/kern/kern_tc.c ============================================================================== --- stable/9/sys/kern/kern_tc.c Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/kern/kern_tc.c Thu Apr 2 01:02:42 2015 (r280973) @@ -515,10 +515,10 @@ tc_windup(void) /* Now is a good time to change timecounters. */ if (th->th_counter != timecounter) { #ifndef __arm__ - if ((timecounter->tc_flags & TC_FLAGS_C3STOP) != 0) - cpu_disable_deep_sleep++; - if ((th->th_counter->tc_flags & TC_FLAGS_C3STOP) != 0) - cpu_disable_deep_sleep--; + if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0) + cpu_disable_c2_sleep++; + if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0) + cpu_disable_c2_sleep--; #endif th->th_counter = timecounter; th->th_offset_count = ncount; Modified: stable/9/sys/sys/systm.h ============================================================================== --- stable/9/sys/sys/systm.h Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/sys/systm.h Thu Apr 2 01:02:42 2015 (r280973) @@ -268,8 +268,9 @@ void cpu_startprofclock(void); void cpu_stopprofclock(void); void cpu_idleclock(void); void cpu_activeclock(void); -extern int cpu_can_deep_sleep; -extern int cpu_disable_deep_sleep; +extern int cpu_deepest_sleep; +extern int cpu_disable_c2_sleep; +extern int cpu_disable_c3_sleep; int cr_cansee(struct ucred *u1, struct ucred *u2); int cr_canseesocket(struct ucred *cred, struct socket *so); Modified: stable/9/sys/sys/timetc.h ============================================================================== --- stable/9/sys/sys/timetc.h Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/sys/timetc.h Thu Apr 2 01:02:42 2015 (r280973) @@ -58,7 +58,7 @@ struct timecounter { * means "only use at explicit request". */ u_int tc_flags; -#define TC_FLAGS_C3STOP 1 /* Timer dies in C3. */ +#define TC_FLAGS_C2STOP 1 /* Timer dies in C2+. */ void *tc_priv; /* Pointer to the timecounter's private parts. */ Modified: stable/9/sys/x86/x86/tsc.c ============================================================================== --- stable/9/sys/x86/x86/tsc.c Thu Apr 2 00:30:53 2015 (r280972) +++ stable/9/sys/x86/x86/tsc.c Thu Apr 2 01:02:42 2015 (r280973) @@ -554,16 +554,16 @@ init_TSC_tc(void) } /* - * We cannot use the TSC if it stops incrementing in deep sleep. - * Currently only Intel CPUs are known for this problem unless - * the invariant TSC bit is set. + * We cannot use the TSC if it stops incrementing while idle. + * Intel CPUs without a C-state invariant TSC can stop the TSC + * in either C2 or C3. */ - if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && + if (cpu_deepest_sleep >= 2 && cpu_vendor_id == CPU_VENDOR_INTEL && (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { tsc_timecounter.tc_quality = -1000; - tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; + tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; if (bootverbose) - printf("TSC timecounter disabled: C3 enabled.\n"); + printf("TSC timecounter disabled: C2/C3 may halt it.\n"); goto init; }