Date: Sun, 12 Jan 2020 06:07:54 +0000 (UTC) From: Mateusz Guzik <mjg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r356655 - in head/sys: arm/samsung/exynos arm/versatile cddl/compat/opensolaris/sys cddl/contrib/opensolaris/uts/common/fs/zfs cddl/contrib/opensolaris/uts/common/os ddb dev/acpica dev/... Message-ID: <202001120607.00C67sW0050677@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mjg Date: Sun Jan 12 06:07:54 2020 New Revision: 356655 URL: https://svnweb.freebsd.org/changeset/base/356655 Log: Add KERNEL_PANICKED macro for use in place of direct panicstr tests Modified: head/sys/arm/samsung/exynos/chrome_kb.c head/sys/arm/versatile/pl050.c head/sys/cddl/compat/opensolaris/sys/mutex.h head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c head/sys/cddl/contrib/opensolaris/uts/common/os/fm.c head/sys/ddb/db_textdump.c head/sys/dev/acpica/acpi.c head/sys/dev/drm2/drm_fb_helper.c head/sys/dev/iscsi/iscsi.c head/sys/dev/mrsas/mrsas.c head/sys/dev/syscons/syscons.c head/sys/dev/vt/vt_core.c head/sys/dev/xen/console/xen_console.c head/sys/gdb/netgdb.c head/sys/geom/journal/g_journal.c head/sys/geom/mirror/g_mirror.c head/sys/kern/kern_ktr.c head/sys/kern/kern_lock.c head/sys/kern/kern_mutex.c head/sys/kern/kern_switch.c head/sys/kern/kern_synch.c head/sys/kern/sched_4bsd.c head/sys/kern/sched_ule.c head/sys/kern/subr_csan.c head/sys/kern/subr_prf.c head/sys/kern/subr_smp.c head/sys/kern/subr_witness.c head/sys/kern/tty_info.c head/sys/kern/vfs_bio.c head/sys/kern/vfs_subr.c head/sys/netinet/netdump/netdump_client.c head/sys/sparc64/sparc64/mp_machdep.c head/sys/sys/systm.h head/sys/x86/x86/local_apic.c head/sys/x86/x86/mp_x86.c Modified: head/sys/arm/samsung/exynos/chrome_kb.c ============================================================================== --- head/sys/arm/samsung/exynos/chrome_kb.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/arm/samsung/exynos/chrome_kb.c Sun Jan 12 06:07:54 2020 (r356655) @@ -81,7 +81,7 @@ __FBSDID("$FreeBSD$"); */ #define CKB_CTX_LOCK_ASSERT() \ do { \ - if (!kdb_active && panicstr == NULL) \ + if (!kdb_active && !KERNEL_PANICKED()) \ mtx_assert(&Giant, MA_OWNED); \ } while (0) #else Modified: head/sys/arm/versatile/pl050.c ============================================================================== --- head/sys/arm/versatile/pl050.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/arm/versatile/pl050.c Sun Jan 12 06:07:54 2020 (r356655) @@ -77,7 +77,7 @@ __FBSDID("$FreeBSD$"); */ #define KMI_CTX_LOCK_ASSERT() \ do { \ - if (!kdb_active && panicstr == NULL) \ + if (!kdb_active && !KERNEL_PANICKED()) \ mtx_assert(&Giant, MA_OWNED); \ } while (0) #else Modified: head/sys/cddl/compat/opensolaris/sys/mutex.h ============================================================================== --- head/sys/cddl/compat/opensolaris/sys/mutex.h Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/cddl/compat/opensolaris/sys/mutex.h Sun Jan 12 06:07:54 2020 (r356655) @@ -42,7 +42,7 @@ typedef enum { } kmutex_type_t; #define MUTEX_HELD(x) (mutex_owned(x)) -#define MUTEX_NOT_HELD(x) (!mutex_owned(x) || panicstr) +#define MUTEX_NOT_HELD(x) (!mutex_owned(x) || KERNEL_PANICKED()) typedef struct sx kmutex_t; Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c ============================================================================== --- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c Sun Jan 12 06:07:54 2020 (r356655) @@ -7296,7 +7296,7 @@ zfs_shutdown(void *arg __unused, int howto __unused) /* * ZFS fini routines can not properly work in a panic-ed system. */ - if (panicstr == NULL) + if (!KERNEL_PANICKED()) (void)zfs__fini(); } Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c ============================================================================== --- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c Sun Jan 12 06:07:54 2020 (r356655) @@ -293,7 +293,7 @@ zfs_sync(vfs_t *vfsp, int waitfor) * Data integrity is job one. We don't want a compromised kernel * writing to the storage pool, so we never sync during panic. */ - if (panicstr) + if (KERNEL_PANICKED()) return (0); /* Modified: head/sys/cddl/contrib/opensolaris/uts/common/os/fm.c ============================================================================== --- head/sys/cddl/contrib/opensolaris/uts/common/os/fm.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/cddl/contrib/opensolaris/uts/common/os/fm.c Sun Jan 12 06:07:54 2020 (r356655) @@ -119,7 +119,7 @@ fm_drain(void *private, void *data, errorq_elem_t *eep { nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep); - if (!panicstr) + if (!KERNEL_PANICKED()) (void) fm_ereport_post(nvl, EVCH_TRYHARD); else fm_nvprint(nvl); @@ -420,7 +420,7 @@ fm_banner(void) if (!fm_panicstr) return; /* panic was not initiated by fm_panic(); do nothing */ - if (panicstr) { + if (KERNEL_PANICKED()) { tod = panic_hrestime; now = panic_hrtime; } else { @@ -472,7 +472,7 @@ fm_ereport_dump(void) char *buf; size_t len; - if (panicstr) { + if (KERNEL_PANICKED()) { tod = panic_hrestime; now = panic_hrtime; } else { @@ -486,7 +486,7 @@ fm_ereport_dump(void) * In the panic case, sysevent_evc_walk_init() will return NULL. */ if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL && - !panicstr) + !KERNEL_PANICKED()) return; /* event channel isn't initialized yet */ while ((sep = sysevent_evc_walk_step(chq)) != NULL) { Modified: head/sys/ddb/db_textdump.c ============================================================================== --- head/sys/ddb/db_textdump.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/ddb/db_textdump.c Sun Jan 12 06:07:54 2020 (r356655) @@ -479,7 +479,7 @@ textdump_dumpsys(struct dumperinfo *di) #endif if (textdump_do_msgbuf) textdump_dump_msgbuf(di); - if (textdump_do_panic && panicstr != NULL) + if (textdump_do_panic && KERNEL_PANICKED()) textdump_dump_panic(di); if (textdump_do_version) textdump_dump_version(di); Modified: head/sys/dev/acpica/acpi.c ============================================================================== --- head/sys/dev/acpica/acpi.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/acpica/acpi.c Sun Jan 12 06:07:54 2020 (r356655) @@ -2166,7 +2166,7 @@ acpi_shutdown_final(void *arg, int howto) } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); - } else if (sc->acpi_do_disable && panicstr == NULL) { + } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. Modified: head/sys/dev/drm2/drm_fb_helper.c ============================================================================== --- head/sys/dev/drm2/drm_fb_helper.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/drm2/drm_fb_helper.c Sun Jan 12 06:07:54 2020 (r356655) @@ -74,7 +74,7 @@ vt_kms_postswitch(void *arg) sc = (struct vt_kms_softc *)arg; - if (!kdb_active && panicstr == NULL) + if (!kdb_active && !KERNEL_PANICKED()) taskqueue_enqueue(taskqueue_thread, &sc->fb_mode_task); else drm_fb_helper_restore_fbdev_mode(sc->fb_helper); Modified: head/sys/dev/iscsi/iscsi.c ============================================================================== --- head/sys/dev/iscsi/iscsi.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/iscsi/iscsi.c Sun Jan 12 06:07:54 2020 (r356655) @@ -2501,7 +2501,7 @@ static void iscsi_shutdown_post(struct iscsi_softc *sc) { - if (panicstr == NULL) { + if (!KERNEL_PANICKED()) { ISCSI_DEBUG("removing all sessions due to shutdown"); iscsi_terminate_sessions(sc); } Modified: head/sys/dev/mrsas/mrsas.c ============================================================================== --- head/sys/dev/mrsas/mrsas.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/mrsas/mrsas.c Sun Jan 12 06:07:54 2020 (r356655) @@ -1190,7 +1190,7 @@ mrsas_shutdown(device_t dev) sc = device_get_softc(dev); sc->remove_in_progress = 1; - if (panicstr == NULL) { + if (!KERNEL_PANICKED()) { if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); i = 0; Modified: head/sys/dev/syscons/syscons.c ============================================================================== --- head/sys/dev/syscons/syscons.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/syscons/syscons.c Sun Jan 12 06:07:54 2020 (r356655) @@ -2163,7 +2163,7 @@ sccnupdate(scr_stat *scp) if (suspend_in_progress || scp->sc->font_loading_in_progress) return; - if (kdb_active || panicstr || shutdown_in_progress) { + if (kdb_active || KERNEL_PANICKED() || shutdown_in_progress) { sc_touch_scrn_saver(); } else if (scp != scp->sc->cur_scp) { return; @@ -2229,7 +2229,7 @@ scrn_timer(void *arg) } /* should we stop the screen saver? */ - if (kdb_active || panicstr || shutdown_in_progress) + if (kdb_active || KERNEL_PANICKED() || shutdown_in_progress) sc_touch_scrn_saver(); if (run_scrn_saver) { if (time_uptime > sc->scrn_time_stamp + scrn_blank_time) Modified: head/sys/dev/vt/vt_core.c ============================================================================== --- head/sys/dev/vt/vt_core.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/vt/vt_core.c Sun Jan 12 06:07:54 2020 (r356655) @@ -1301,7 +1301,7 @@ vt_flush(struct vt_device *vd) /* Check if the cursor should be displayed or not. */ if ((vd->vd_flags & VDF_MOUSECURSOR) && /* Mouse support enabled. */ !(vw->vw_flags & VWF_MOUSE_HIDE) && /* Cursor displayed. */ - !kdb_active && panicstr == NULL) { /* DDB inactive. */ + !kdb_active && !KERNEL_PANICKED()) { /* DDB inactive. */ vd->vd_mshown = 1; } else { vd->vd_mshown = 0; @@ -1398,7 +1398,7 @@ vtterm_done(struct terminal *tm) struct vt_window *vw = tm->tm_softc; struct vt_device *vd = vw->vw_device; - if (kdb_active || panicstr != NULL) { + if (kdb_active || KERNEL_PANICKED()) { /* Switch to the debugger. */ if (vd->vd_curwindow != vw) { vd->vd_curwindow = vw; Modified: head/sys/dev/xen/console/xen_console.c ============================================================================== --- head/sys/dev/xen/console/xen_console.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/dev/xen/console/xen_console.c Sun Jan 12 06:07:54 2020 (r356655) @@ -199,7 +199,7 @@ xc_printf(const char *fmt, ...) static inline void xencons_lock(struct xencons_priv *cons) { - if (panicstr == NULL) + if (!KERNEL_PANICKED()) mtx_lock_spin(&cons->mtx); } @@ -207,7 +207,7 @@ static inline void xencons_lock(struct xencons_priv *c static inline void xencons_unlock(struct xencons_priv *cons) { - if (panicstr == NULL) + if (!KERNEL_PANICKED()) mtx_unlock_spin(&cons->mtx); } Modified: head/sys/gdb/netgdb.c ============================================================================== --- head/sys/gdb/netgdb.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/gdb/netgdb.c Sun Jan 12 06:07:54 2020 (r356655) @@ -340,7 +340,7 @@ DB_FUNC(netgdb, db_netgdb_cmd, db_cmd_table, CS_OWN, N struct debugnet_pcb *pcb; int error; - if (panicstr == NULL) { + if (!KERNEL_PANICKED()) { /* TODO: This limitation should be removed in future work. */ printf("%s: netgdb is currently limited to use only after a " "panic. Sorry.\n", __func__); Modified: head/sys/geom/journal/g_journal.c ============================================================================== --- head/sys/geom/journal/g_journal.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/geom/journal/g_journal.c Sun Jan 12 06:07:54 2020 (r356655) @@ -2653,7 +2653,7 @@ g_journal_shutdown(void *arg, int howto __unused) struct g_class *mp; struct g_geom *gp, *gp2; - if (panicstr != NULL) + if (KERNEL_PANICKED()) return; mp = arg; g_topology_lock(); Modified: head/sys/geom/mirror/g_mirror.c ============================================================================== --- head/sys/geom/mirror/g_mirror.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/geom/mirror/g_mirror.c Sun Jan 12 06:07:54 2020 (r356655) @@ -3481,7 +3481,7 @@ g_mirror_shutdown_post_sync(void *arg, int howto) struct g_mirror_softc *sc; int error; - if (panicstr != NULL) + if (KERNEL_PANICKED()) return; mp = arg; Modified: head/sys/kern/kern_ktr.c ============================================================================== --- head/sys/kern/kern_ktr.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/kern_ktr.c Sun Jan 12 06:07:54 2020 (r356655) @@ -324,7 +324,7 @@ ktr_tracepoint(uint64_t mask, const char *file, int li #endif int cpu; - if (panicstr || kdb_active) + if (KERNEL_PANICKED() || kdb_active) return; if ((ktr_mask & mask) == 0 || ktr_buf == NULL) return; Modified: head/sys/kern/kern_lock.c ============================================================================== --- head/sys/kern/kern_lock.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/kern_lock.c Sun Jan 12 06:07:54 2020 (r356655) @@ -558,7 +558,7 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struc int contested = 0; #endif - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) goto out; tid = (uintptr_t)curthread; @@ -700,7 +700,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struc int contested = 0; #endif - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) goto out; tid = (uintptr_t)curthread; @@ -882,7 +882,7 @@ lockmgr_upgrade(struct lock *lk, u_int flags, struct l int wakeup_swapper = 0; int op; - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) goto out; tid = (uintptr_t)curthread; @@ -941,7 +941,7 @@ lockmgr_lock_fast_path(struct lock *lk, u_int flags, s u_int op; bool locked; - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) return (0); op = flags & LK_TYPE_MASK; @@ -1003,7 +1003,7 @@ lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_i { int wakeup_swapper = 0; - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) goto out; wakeup_swapper = wakeupshlk(lk, file, line); @@ -1022,7 +1022,7 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_i u_int realexslp; int queue; - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) goto out; tid = (uintptr_t)curthread; @@ -1126,7 +1126,7 @@ lockmgr_unlock_fast_path(struct lock *lk, u_int flags, const char *file; int line; - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) return (0); file = __FILE__; @@ -1254,7 +1254,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lo int contested = 0; #endif - if (panicstr != NULL) + if (KERNEL_PANICKED()) return (0); error = 0; @@ -1662,7 +1662,7 @@ _lockmgr_assert(const struct lock *lk, int what, const { int slocked = 0; - if (panicstr != NULL) + if (KERNEL_PANICKED()) return; switch (what) { case KA_SLOCKED: Modified: head/sys/kern/kern_mutex.c ============================================================================== --- head/sys/kern/kern_mutex.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/kern_mutex.c Sun Jan 12 06:07:54 2020 (r356655) @@ -1071,7 +1071,7 @@ __mtx_assert(const volatile uintptr_t *c, int what, co { const struct mtx *m; - if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) + if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); @@ -1229,7 +1229,7 @@ _mtx_lock_indefinite_check(struct mtx *m, struct lock_ struct thread *td; ldap->spin_cnt++; - if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL) + if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED()) cpu_lock_delay(); else { td = mtx_owner(m); Modified: head/sys/kern/kern_switch.c ============================================================================== --- head/sys/kern/kern_switch.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/kern_switch.c Sun Jan 12 06:07:54 2020 (r356655) @@ -181,7 +181,7 @@ choosethread(void) td = sched_choose(); - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) return (choosethread_panic(td)); TD_SET_RUNNING(td); Modified: head/sys/kern/kern_synch.c ============================================================================== --- head/sys/kern/kern_synch.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/kern_synch.c Sun Jan 12 06:07:54 2020 (r356655) @@ -480,8 +480,8 @@ mi_switch(int flags) if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) mtx_assert(&Giant, MA_NOTOWNED); #endif - KASSERT(td->td_critnest == 1 || panicstr, - ("mi_switch: switch in a critical section")); + KASSERT(td->td_critnest == 1 || KERNEL_PANICKED(), + ("mi_switch: switch in a critical section")); KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, ("mi_switch: switch must be voluntary or involuntary")); Modified: head/sys/kern/sched_4bsd.c ============================================================================== --- head/sys/kern/sched_4bsd.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/sched_4bsd.c Sun Jan 12 06:07:54 2020 (r356655) @@ -348,7 +348,7 @@ maybe_preempt(struct thread *td) ("maybe_preempt: trying to run inhibited thread")); pri = td->td_priority; cpri = ctd->td_priority; - if (panicstr != NULL || pri >= cpri /* || dumping */ || + if (KERNEL_PANICKED() || pri >= cpri /* || dumping */ || TD_IS_INHIBITED(ctd)) return (0); #ifndef FULL_PREEMPTION @@ -1138,7 +1138,7 @@ forward_wakeup(int cpunum) if ((!forward_wakeup_enabled) || (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) return (0); - if (!smp_started || panicstr) + if (!smp_started || KERNEL_PANICKED()) return (0); forward_wakeups_requested++; Modified: head/sys/kern/sched_ule.c ============================================================================== --- head/sys/kern/sched_ule.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/sched_ule.c Sun Jan 12 06:07:54 2020 (r356655) @@ -2533,7 +2533,7 @@ sched_setpreempt(struct thread *td) cpri = ctd->td_priority; if (pri < cpri) ctd->td_flags |= TDF_NEEDRESCHED; - if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) + if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) return; if (!sched_shouldpreempt(pri, cpri, 0)) return; Modified: head/sys/kern/subr_csan.c ============================================================================== --- head/sys/kern/subr_csan.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/subr_csan.c Sun Jan 12 06:07:54 2020 (r356655) @@ -153,7 +153,7 @@ kcsan_access(uintptr_t addr, size_t size, bool write, return; if (__predict_false(kcsan_md_unsupported((vm_offset_t)addr))) return; - if (__predict_false(panicstr != NULL)) + if (KERNEL_PANICKED()) return; new.addr = addr; Modified: head/sys/kern/subr_prf.c ============================================================================== --- head/sys/kern/subr_prf.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/subr_prf.c Sun Jan 12 06:07:54 2020 (r356655) @@ -409,7 +409,7 @@ vprintf(const char *fmt, va_list ap) retval = _vprintf(-1, TOCONS | TOLOG, fmt, ap); - if (!panicstr) + if (!KERNEL_PANICKED()) msgbuftrigger = 1; return (retval); @@ -423,7 +423,7 @@ prf_putbuf(char *bufr, int flags, int pri) msglogstr(bufr, pri, /*filter_cr*/1); if (flags & TOCONS) { - if ((panicstr == NULL) && (constty != NULL)) + if ((!KERNEL_PANICKED()) && (constty != NULL)) msgbuf_addstr(&consmsgbuf, -1, bufr, /*filter_cr*/ 0); @@ -492,7 +492,7 @@ putchar(int c, void *arg) return; } - if ((flags & TOTTY) && tp != NULL && panicstr == NULL) + if ((flags & TOTTY) && tp != NULL && !KERNEL_PANICKED()) tty_putchar(tp, c); if ((flags & (TOCONS | TOLOG)) && c != '\0') Modified: head/sys/kern/subr_smp.c ============================================================================== --- head/sys/kern/subr_smp.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/subr_smp.c Sun Jan 12 06:07:54 2020 (r356655) @@ -195,7 +195,7 @@ forward_signal(struct thread *td) CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); - if (!smp_started || cold || panicstr) + if (!smp_started || cold || KERNEL_PANICKED()) return; if (!forward_signal_enabled) return; Modified: head/sys/kern/subr_witness.c ============================================================================== --- head/sys/kern/subr_witness.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/subr_witness.c Sun Jan 12 06:07:54 2020 (r356655) @@ -899,7 +899,7 @@ witness_init(struct lock_object *lock, const char *typ * it to the pending_locks list. If it is not too early, then enroll * the lock now. */ - if (witness_watch < 1 || panicstr != NULL || + if (witness_watch < 1 || KERNEL_PANICKED() || (lock->lo_flags & LO_WITNESS) == 0) lock->lo_witness = NULL; else if (witness_cold) { @@ -1077,7 +1077,7 @@ int witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) { - if (witness_watch == -1 || panicstr != NULL) + if (witness_watch == -1 || KERNEL_PANICKED()) return (0); /* Require locks that witness knows about. */ @@ -1118,7 +1118,7 @@ witness_checkorder(struct lock_object *lock, int flags int i, j; if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || - panicstr != NULL) + KERNEL_PANICKED()) return; w = lock->lo_witness; @@ -1464,7 +1464,7 @@ witness_lock(struct lock_object *lock, int flags, cons struct thread *td; if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || - panicstr != NULL) + KERNEL_PANICKED()) return; w = lock->lo_witness; td = curthread; @@ -1522,7 +1522,7 @@ witness_upgrade(struct lock_object *lock, int flags, c struct lock_class *class; KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); - if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) + if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED()) return; class = LOCK_CLASS(lock); if (witness_watch) { @@ -1568,7 +1568,7 @@ witness_downgrade(struct lock_object *lock, int flags, struct lock_class *class; KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); - if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) + if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED()) return; class = LOCK_CLASS(lock); if (witness_watch) { @@ -1616,7 +1616,7 @@ witness_unlock(struct lock_object *lock, int flags, co register_t s; int i, j; - if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) + if (witness_cold || lock->lo_witness == NULL || KERNEL_PANICKED()) return; td = curthread; class = LOCK_CLASS(lock); @@ -1722,7 +1722,7 @@ witness_thread_exit(struct thread *td) int i, n; lle = td->td_sleeplocks; - if (lle == NULL || panicstr != NULL) + if (lle == NULL || KERNEL_PANICKED()) return; if (lle->ll_count != 0) { for (n = 0; lle != NULL; lle = lle->ll_next) @@ -1757,7 +1757,7 @@ witness_warn(int flags, struct lock_object *lock, cons va_list ap; int i, n; - if (witness_cold || witness_watch < 1 || panicstr != NULL) + if (witness_cold || witness_watch < 1 || KERNEL_PANICKED()) return (0); n = 0; td = curthread; @@ -1849,7 +1849,7 @@ enroll(const char *description, struct lock_class *loc MPASS(description != NULL); - if (witness_watch == -1 || panicstr != NULL) + if (witness_watch == -1 || KERNEL_PANICKED()) return (NULL); if ((lock_class->lc_flags & LC_SPINLOCK)) { if (witness_skipspin) @@ -2323,7 +2323,7 @@ witness_save(struct lock_object *lock, const char **fi if (SCHEDULER_STOPPED()) return; KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); - if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) + if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED()) return; class = LOCK_CLASS(lock); if (class->lc_flags & LC_SLEEPLOCK) @@ -2358,7 +2358,7 @@ witness_restore(struct lock_object *lock, const char * if (SCHEDULER_STOPPED()) return; KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); - if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) + if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED()) return; class = LOCK_CLASS(lock); if (class->lc_flags & LC_SLEEPLOCK) @@ -2388,7 +2388,7 @@ witness_assert(const struct lock_object *lock, int fla struct lock_instance *instance; struct lock_class *class; - if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) + if (lock->lo_witness == NULL || witness_watch < 1 || KERNEL_PANICKED()) return; class = LOCK_CLASS(lock); if ((class->lc_flags & LC_SLEEPLOCK) != 0) @@ -2460,7 +2460,7 @@ witness_setflag(struct lock_object *lock, int flag, in struct lock_instance *instance; struct lock_class *class; - if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) + if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED()) return; class = LOCK_CLASS(lock); if (class->lc_flags & LC_SLEEPLOCK) Modified: head/sys/kern/tty_info.c ============================================================================== --- head/sys/kern/tty_info.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/tty_info.c Sun Jan 12 06:07:54 2020 (r356655) @@ -229,7 +229,7 @@ sbuf_tty_drain(void *a, const char *d, int len) cnputsn(d, len); return (len); } - if (tp != NULL && panicstr == NULL) { + if (tp != NULL && !KERNEL_PANICKED()) { rc = tty_putstrn(tp, d, len); if (rc != 0) return (-ENXIO); Modified: head/sys/kern/vfs_bio.c ============================================================================== --- head/sys/kern/vfs_bio.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/vfs_bio.c Sun Jan 12 06:07:54 2020 (r356655) @@ -1431,7 +1431,7 @@ bufshutdown(int show_busybufs) /* * Unmount filesystems */ - if (panicstr == NULL) + if (!KERNEL_PANICKED()) vfs_unmountall(); } swapoff_all(); Modified: head/sys/kern/vfs_subr.c ============================================================================== --- head/sys/kern/vfs_subr.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/kern/vfs_subr.c Sun Jan 12 06:07:54 2020 (r356655) @@ -5023,7 +5023,7 @@ extattr_check_cred(struct vnode *vp, int attrnamespace * This only exists to suppress warnings from unlocked specfs accesses. It is * no longer ok to have an unlocked VFS. */ -#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ +#define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ (vp)->v_type == VCHR || (vp)->v_type == VBAD) int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ @@ -5172,7 +5172,7 @@ vop_strategy_pre(void *ap) if ((bp->b_flags & B_CLUSTER) != 0) return; - if (panicstr == NULL && !BUF_ISLOCKED(bp)) { + if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { if (vfs_badlock_print) printf( "VOP_STRATEGY: bp is not locked but should be\n"); Modified: head/sys/netinet/netdump/netdump_client.c ============================================================================== --- head/sys/netinet/netdump/netdump_client.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/netinet/netdump/netdump_client.c Sun Jan 12 06:07:54 2020 (r356655) @@ -293,7 +293,7 @@ netdump_start(struct dumperinfo *di) if (!netdump_enabled()) return (EINVAL); - if (panicstr == NULL) { + if (!KERNEL_PANICKED()) { printf( "netdump_start: netdump may only be used after a panic\n"); return (EINVAL); Modified: head/sys/sparc64/sparc64/mp_machdep.c ============================================================================== --- head/sys/sparc64/sparc64/mp_machdep.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/sparc64/sparc64/mp_machdep.c Sun Jan 12 06:07:54 2020 (r356655) @@ -611,7 +611,7 @@ spitfire_ipi_single(u_int cpu, u_long d0, u_long d1, u if ((ids & (IDR_BUSY | IDR_NACK)) == 0) return; } - if (kdb_active != 0 || panicstr != NULL) + if (kdb_active != 0 || KERNEL_PANICKED()) printf("%s: couldn't send IPI to module 0x%u\n", __func__, mid); else @@ -650,7 +650,7 @@ cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_ if ((ids & (IDR_BUSY | IDR_NACK)) == 0) return; } - if (kdb_active != 0 || panicstr != NULL) + if (kdb_active != 0 || KERNEL_PANICKED()) printf("%s: couldn't send IPI to module 0x%u\n", __func__, mid); else @@ -709,7 +709,7 @@ cheetah_ipi_selected(cpuset_t cpus, u_long d0, u_long if (CPU_EMPTY(&cpus)) return; } - if (kdb_active != 0 || panicstr != NULL) + if (kdb_active != 0 || KERNEL_PANICKED()) printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n", __func__, cpusetobj_strprint(ipi_pbuf, &cpus), ids); else @@ -750,7 +750,7 @@ jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u if ((ids & busynack) == 0) return; } - if (kdb_active != 0 || panicstr != NULL) + if (kdb_active != 0 || KERNEL_PANICKED()) printf("%s: couldn't send IPI to module 0x%u\n", __func__, mid); else @@ -801,7 +801,7 @@ jalapeno_ipi_selected(cpuset_t cpus, u_long d0, u_long (2 * cpuid_to_mid[cpu]))) == 0) CPU_CLR(cpu, &cpus); } - if (kdb_active != 0 || panicstr != NULL) + if (kdb_active != 0 || KERNEL_PANICKED()) printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n", __func__, cpusetobj_strprint(ipi_pbuf, &cpus), ids); else Modified: head/sys/sys/systm.h ============================================================================== --- head/sys/sys/systm.h Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/sys/systm.h Sun Jan 12 06:07:54 2020 (r356655) @@ -53,6 +53,7 @@ extern int cold; /* nonzero if we are doing a cold bo extern int suspend_blocked; /* block suspend due to pending shutdown */ extern int rebooting; /* kern_reboot() has been called. */ extern const char *panicstr; /* panic message */ +#define KERNEL_PANICKED() __predict_false(panicstr != NULL) extern char version[]; /* system version */ extern char compiler_version[]; /* compiler version */ extern char copyright[]; /* system copyright */ Modified: head/sys/x86/x86/local_apic.c ============================================================================== --- head/sys/x86/x86/local_apic.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/x86/x86/local_apic.c Sun Jan 12 06:07:54 2020 (r356655) @@ -2076,7 +2076,7 @@ native_lapic_ipi_vectored(u_int vector, int dest) /* Wait for an earlier IPI to finish. */ if (!lapic_ipi_wait(BEFORE_SPIN)) { - if (panicstr != NULL) + if (KERNEL_PANICKED()) return; else panic("APIC: Previous IPI is stuck"); Modified: head/sys/x86/x86/mp_x86.c ============================================================================== --- head/sys/x86/x86/mp_x86.c Sun Jan 12 05:25:06 2020 (r356654) +++ head/sys/x86/x86/mp_x86.c Sun Jan 12 06:07:54 2020 (r356655) @@ -1459,7 +1459,7 @@ cpustop_handler(void) * again, and might as well save power / release resources * (e.g., overprovisioned VM infrastructure). */ - while (__predict_false(!IS_BSP() && panicstr != NULL)) + while (__predict_false(!IS_BSP() && KERNEL_PANICKED())) halt(); } @@ -1672,7 +1672,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector int cpu; /* It is not necessary to signal other CPUs while in the debugger. */ - if (kdb_active || panicstr != NULL) + if (kdb_active || KERNEL_PANICKED()) return; /*
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202001120607.00C67sW0050677>