Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 18 Aug 2016 09:11:26 +0000
From:      bugzilla-noreply@freebsd.org
To:        freebsd-bugs@FreeBSD.org
Subject:   [Bug 211959] vfs: kernel panic
Message-ID:  <bug-211959-8@https.bugs.freebsd.org/bugzilla/>

next in thread | raw e-mail | index | archive | help
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=3D211959

            Bug ID: 211959
           Summary: vfs: kernel panic
           Product: Base System
           Version: 11.0-RC1
          Hardware: Any
                OS: Any
            Status: New
          Severity: Affects Only Me
          Priority: ---
         Component: kern
          Assignee: freebsd-bugs@FreeBSD.org
          Reporter: pkubaj@anongoth.pl

I get a kernel panic when moving files from phone to HDD.

It happens after the upgrade from 10.3-RELEASE to 11.0-RC1. Seems to be rel=
ated
to VFS fixes which were committed recently. Unfortunately, I didn't use 11.0
before those fixes, so I can't say for sure.

(kgdb) bt
#0  doadump (textdump=3D<value optimized out>) at pcpu.h:221
#1  0xffffffff80549819 in kern_reboot (howto=3D260) at
/usr/src/sys/kern/kern_shutdown.c:366
#2  0xffffffff80549dcb in vpanic (fmt=3D<value optimized out>, ap=3D<value
optimized out>) at /usr/src/sys/kern/kern_shutdown.c:759
#3  0xffffffff80549c03 in panic (fmt=3D0x0) at
/usr/src/sys/kern/kern_shutdown.c:690
#4  0xffffffff808a2691 in trap_fatal (frame=3D0xfffffe04697f7750, eva=3D0) =
at
/usr/src/sys/amd64/amd64/trap.c:841
#5  0xffffffff808a2320 in trap (frame=3D0xfffffe04697f7750) at
/usr/src/sys/amd64/amd64/trap.c:203
#6  0xffffffff80887041 in calltrap () at
/usr/src/sys/amd64/amd64/exception.S:236
#7  0xffffffff804bfd1c in g_vfs_strategy (bo=3D0xfffff80188ae7658,
bp=3D0xfffffe03e2887370) at /usr/src/sys/geom/geom_vfs.c:173
#8  0xffffffff805f6e89 in bufwrite (bp=3D0xfffffe03e2887370) at buf.h:405
#9  0xffffffff80606040 in vop_stdfsync (ap=3D0xfffffe04697f79a8) at
/usr/src/sys/kern/vfs_default.c:695
#10 0xffffffff80442d06 in devfs_fsync (ap=3D0xfffffe04697f79a8) at
/usr/src/sys/fs/devfs/devfs_vnops.c:702
#11 0xffffffff808f6e7d in VOP_FSYNC_APV (vop=3D<value optimized out>, a=3D<=
value
optimized out>) at vnode_if.c:1331
#12 0xffffffff8061e66e in sched_sync () at vnode_if.h:549
#13 0xffffffff8050a275 in fork_exit (callout=3D0xffffffff8061e2b0 <sched_sy=
nc>,
arg=3D0x0, frame=3D0xfffffe04697f7a40) at /usr/src/sys/kern/kern_fork.c:1038
#14 0xffffffff8088750e in fork_trampoline () at
/usr/src/sys/amd64/amd64/exception.S:611
#15 0x0000000000000000 in ?? ()
(kgdb) up 7
#7  0xffffffff804bfd1c in g_vfs_strategy (bo=3D0xfffff80188ae7658,
bp=3D0xfffffe03e2887370) at /usr/src/sys/geom/geom_vfs.c:173
173             mtx_lock(&sc->sc_mtx);
(kgdb) list
168             sc =3D cp->geom->softc;
169=20=20=20=20=20
170             /*
171              * If the provider has orphaned us, just return EXIO.
172              */
173             mtx_lock(&sc->sc_mtx);
174             if (sc->sc_orphaned) {
175                     mtx_unlock(&sc->sc_mtx);
176                     bp->b_error =3D ENXIO;
177                     bp->b_ioflags |=3D BIO_ERROR;
(kgdb) print bo
$1 =3D (struct bufobj *) 0xfffff80188ae7658
(kgdb) print *bo
$2 =3D {bo_lock =3D {lock_object =3D {lo_name =3D 0xffffffff80969cd9 "bufobj
interlock", lo_flags =3D 86179840, lo_data =3D 0, lo_witness =3D 0x0}, rw_l=
ock =3D 1},
bo_ops =3D 0xffffffff80bf1c08, bo_object =3D 0xfffff8033d02dd68, bo_synclis=
t =3D
{le_next =3D 0x0, le_prev =3D 0xfffff800084b0670},=20
  bo_private =3D 0xfffff80188ae7588, __bo_vnode =3D 0xfffff80188ae7588, bo_=
clean =3D
{bv_hd =3D {tqh_first =3D 0xfffffe03e2886f10, tqh_last =3D 0xfffffe03e2962a=
b0},
bv_root =3D {pt_root =3D 18446735277816184480}, bv_cnt =3D 1501}, bo_dirty =
=3D {bv_hd =3D
{tqh_first =3D 0x0,=20
      tqh_last =3D 0xfffff80188ae76c8}, bv_root =3D {pt_root =3D 0}, bv_cnt=
 =3D 0},
bo_numoutput =3D 1, bo_flag =3D 0, bo_bsize =3D 512}
(kgdb) print *bp
$3 =3D {b_bufobj =3D 0xfffff80188ae7658, b_bcount =3D 4096, b_caller1 =3D 0=
x0, b_data =3D
0xfffffe03e6398000 "=EF=BF=BD=EF=BF=BD=EF=BF=BD\017=EF=BF=BD=EF=BF=BD=EF=BF=
=BD\017=EF=BF=BD=EF=BF=BD=EF=BF=BD\017=EF=BF=BD=EF=BF=BD=EF=BF=BD\017=EF=BF=
=BD=EF=BF=BD=EF=BF=BD\017", b_error =3D 0, b_iocmd
=3D 2, b_ioflags =3D 2, b_iooffset =3D 16384, b_resid =3D 0, b_iodone =3D 0=
, b_blkno =3D
32, b_offset =3D 16384, b_bobufs =3D {
    tqe_next =3D 0xfffffe03e2892040, tqe_prev =3D 0xfffffe03e2887190}, b_vf=
lags =3D
1, b_qindex =3D 2, b_flags =3D 2684354596, b_xflags =3D 2 '\002', b_lock =3D
{lock_object =3D {lo_name =3D 0xffffffff80967645 "bufwait", lo_flags =3D 10=
8199936,
lo_data =3D 0, lo_witness =3D 0x0},=20
    lk_lock =3D 18446744073709551600, lk_exslpfail =3D 0, lk_timo =3D 0, lk=
_pri =3D
96}, b_bufsize =3D 4096, b_runningbufspace =3D 4096, b_kvasize =3D 16384, b=
_dirtyoff
=3D 0, b_dirtyend =3D 0, b_kvabase =3D 0xfffffe03e6398000
"=EF=BF=BD=EF=BF=BD=EF=BF=BD\017=EF=BF=BD=EF=BF=BD=EF=BF=BD\017=EF=BF=BD=EF=
=BF=BD=EF=BF=BD\017=EF=BF=BD=EF=BF=BD=EF=BF=BD\017=EF=BF=BD=EF=BF=BD=EF=BF=
=BD\017", b_lblkno =3D 32,=20
  b_vp =3D 0xfffff80188ae7588, b_rcred =3D 0x0, b_wcred =3D 0x0, {b_freelis=
t =3D
{tqe_next =3D 0xfffffe03e2959140, tqe_prev =3D 0xffffffff80e67aa0}, {b_pgio=
done =3D
0xfffffe03e2959140, b_pgbefore =3D -2132378976, b_pgafter =3D -1}}, b_clust=
er =3D
{cluster_head =3D {
      tqh_first =3D 0xfffffe03e2887140, tqh_last =3D 0xfffffe03e2822ec0},
cluster_entry =3D {tqe_next =3D 0xfffffe03e2887140, tqe_prev =3D
0xfffffe03e2822ec0}}, b_pages =3D 0xfffffe03e2887470, b_npages =3D 1, b_dep=
 =3D
{lh_first =3D 0x0}, b_fsprivate1 =3D 0x0, b_fsprivate2 =3D 0x0,=20
  b_fsprivate3 =3D 0x0, b_pin_count =3D 0}
(kgdb) print sc
$4 =3D (struct g_vfs_softc *) 0x2e776f6c6c610065
(kgdb) print *sc
Cannot access memory at address 0x2e776f6c6c610065


If there's anything else I can check, please ask.

--=20
You are receiving this mail because:
You are the assignee for the bug.=



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?bug-211959-8>