Date: Sat, 01 Dec 2012 01:34:04 -0800 (PST) From: sig6247 <sig6247@gmail.com> To: Konstantin Belousov <kostikbel@gmail.com> Cc: freebsd-current@freebsd.org, fs@freebsd.org Subject: Re: clang compiled kernel panic when mounting zfs root on i386 Message-ID: <50b9cf0c.0fd9650a.5bbf.ffffb9b3@mx.google.com> In-Reply-To: <20121130164715.GW3013@kib.kiev.ua> (Konstantin Belousov's message of "Fri, 30 Nov 2012 18:47:15 %2B0200") References: <50b37d46.8584440a.735c.ffffb4e6@mx.google.com> <20121126171658.GD3013@kib.kiev.ua> <20121127071243.D1255@besplex.bde.org> <20121129232944.GQ3013@kib.kiev.ua> <50b8a9c5.e64dec0a.1d88.133a@mx.google.com> <20121130164715.GW3013@kib.kiev.ua>
next in thread | previous in thread | raw e-mail | index | archive | help
On Fri, 30 Nov 2012 18:47:15 +0200, Konstantin Belousov <kostikbel@gmail.com> wrote: > Hm, this is not very useful. Although the panic is again caused by the stack > overflow, most likely (please also include the output of the "show thread" > from ddb), it is at different place, and probably at the leaf function. > > Can you try some more times, so that we could see 'big' backtrace ? Sure. Thanks. WARNING: WITNESS option enabled, expect reduced performance. Trying to mount root from zfs:zroot []... Fatal double fault: eip = 0xc0add15d esp = 0xc86bffc8 ebp = 0xc86c003c cpuid = 1; apic id = 01 panic: double fault cpuid = 1 KDB: enter: panic [ thread pid 1 tid 100002 ] Stopped at kdb_enter+0x3d: movl $0,kdb_why db> bt Tracing pid 1 tid 100002 td 0xc89efbc0 kdb_enter(c1065960,c1065960,c10b903b,c139f438,2243cdbd,...) at kdb_enter+0x3d/frame 0xc139f3f0 panic(c10b903b,1,1,1,c86c003c,...) at panic+0x14b/frame 0xc139f42c dblfault_handler() at dblfault_handler+0xab/frame 0xc139f42c --- trap 0x17, eip = 0xc0add15d, esp = 0xc86bffc8, ebp = 0xc86c003c --- witness_checkorder(c1fd7508,9,c109ee8c,7fa,0,...) at witness_checkorder+0x37d/frame 0xc86c003c __mtx_lock_flags(c1fd7518,0,c109ee8c,7fa,c135e998,...) at __mtx_lock_flags+0x87/frame 0xc86c007 0 uma_zalloc_arg(c1fd66c0,0,1,4d3,c86c0110,...) at uma_zalloc_arg+0x605/frame 0xc86c00c8 vm_map_insert(c1fd508c,c13e0ca0,bd3a000,0,cbc39000,...) at vm_map_insert+0x499/frame 0xc86c0130 kmem_back(c1fd508c,cbc39000,1000,3,c86c01d4,...) at kmem_back+0x76/frame 0xc86c018c kmem_malloc(c1fd508c,1000,3) at kmem_malloc+0x250/frame 0xc86c01c0 page_alloc(c1fd1d80,1000,c86c020b,3,c1fd1d80,...) at page_alloc+0x27/frame 0xc86c01d4 keg_alloc_slab(103,4,c109ee8c,870,cbb95f6c,...) at keg_alloc_slab+0xc3/frame 0xc86c0218 keg_fetch_slab(103,c1fd1d80,cbb95f6c,c1fc8230,c86c02c0,...) at keg_fetch_slab+0xe2/frame 0xc86c 0250 zone_fetch_slab(c1fd1d80,c1fd0480,103,826,0,...) at zone_fetch_slab+0x43/frame 0xc86c0268 uma_zalloc_arg(c1fd1d80,0,102,3,2,...) at uma_zalloc_arg+0x3f2/frame 0xc86c02c0 malloc(4c,c1826100,102,c86c0388,c173909a,...) at malloc+0xe9/frame 0xc86c02e8 zfs_kmem_alloc(4c,102,cb7d8820,c89efbc0,cb7d8820,...) at zfs_kmem_alloc+0x20/frame 0xc86c02fc vdev_mirror_io_start(cba232e0,10,cba232e0,1,0,...) at vdev_mirror_io_start+0x14a/frame 0xc86c03 88 zio_vdev_io_start(cba232e0,c89efbc0,0,cba232e0,c86c0600,...) at zio_vdev_io_start+0x228/frame 0 xc86c03e4 zio_execute(cba232e0,cb7d8000,cbbec640,cbbe2000,600,...) at zio_execute+0x106/frame 0xc86c0418 spa_load_verify_cb(cb7d8000,0,cbbec640,cba6bd20,c86c0600,...) at spa_load_verify_cb+0x89/frame 0xc86c0458 traverse_visitbp(cba6bd20,cbbec640,c86c0600,c86c0ba0,0,...) at traverse_visitbp+0x29f/frame 0xc 86c05e0 traverse_dnode(cba6bd20,0,0,23,0,...) at traverse_dnode+0x92/frame 0xc86c0638 traverse_visitbp(cba6bd98,cbbf0080,c86c0890,cba6bdd4,c16ca7e0,...) at traverse_visitbp+0xe47/fr ame 0xc86c07c0 traverse_visitbp(cba6bdd4,cbbe2840,c86c0968,c86c0ba0,0,...) at traverse_visitbp+0xf32/frame 0xc 86c0948 traverse_dnode(cba6bdd4,0,0,0,0,...) at traverse_dnode+0x92/frame 0xc86c09a0 traverse_visitbp(0,cb7d8398,c86c0b50,2,cbbdc214,...) at traverse_visitbp+0x96d/frame 0xc86c0b28 traverse_impl(0,0,cb7d8398,74,0,...) at traverse_impl+0x268/frame 0xc86c0be0 traverse_pool(cb7d8000,74,0,d,c1723830,...) at traverse_pool+0x79/frame 0xc86c0c88 spa_load(0,1,c86c0ec4,1e,0,...) at spa_load+0x1dde/frame 0xc86c0df0 spa_load(0,0,c13d9d14,1,3,...) at spa_load+0x11a5/frame 0xc86c0f58 spa_load_best(0,ffffffff,ffffffff,1,c0add175,...) at spa_load_best+0x71/frame 0xc86c0fb0 spa_open_common(c17dce4e,0,0,c86c1190,c16f1a1c,...) at spa_open_common+0x11a/frame 0xc86c100c spa_open(c86c1078,c86c1074,c17dce4e,c135e998,c1fd7798,...) at spa_open+0x27/frame 0xc86c1020 dsl_dir_open_spa(0,c89770b0,c17dd1e1,c86c11f8,c86c11f4,...) at dsl_dir_open_spa+0x6c/frame 0xc8 6c1190 dsl_dataset_hold(c89770b0,cb7d3800,c86c1240,cb7d3800,cb7d3800,...) at dsl_dataset_hold+0x3a/fra me 0xc86c120c dsl_dataset_own(c89770b0,0,cb7d3800,c86c1240,c1824e30,...) at dsl_dataset_own+0x21/frame 0xc86c 1228 dmu_objset_own(c89770b0,2,1,cb7d3800,c86c1290,...) at dmu_objset_own+0x2a/frame 0xc86c1250 zfsvfs_create(c89770b0,c86c13ac,c17ea09b,681,0,...) at zfsvfs_create+0x4c/frame 0xc86c12a8 zfs_mount(cb99b540,c17f0160,cb98b100,c89cae80,0,...) at zfs_mount+0x42c/frame 0xc86c14e0 vfs_donmount(c89efbc0,4000,0,c86c1790,cb98b180,...) at vfs_donmount+0xc6d/frame 0xc86c1778 kernel_mount(c8977490,4000,0,0,1,...) at kernel_mount+0x6b/frame 0xc86c17b8 parse_mount(cb96e0e0,c1195498,0,1,0,...) at parse_mount+0x606/frame 0xc86c19d8 vfs_mountroot(c13da634,4,c105ceba,2bb,0,...) at vfs_mountroot+0x6cf/frame 0xc86c1c60 start_init(0,c86c1d08,c105f7c4,3db,0,...) at start_init+0x6a/frame 0xc86c1ccc fork_exit(c0a429e0,0,c86c1d08) at fork_exit+0x7f/frame 0xc86c1cf4 fork_trampoline() at fork_trampoline+0x8/frame 0xc86c1cf4 --- trap 0, eip = 0, esp = 0xc86c1d40, ebp = 0 --- db> show thread Thread 100002 at 0xc89efbc0: proc (pid 1): 0xc89edb40 name: kernel stack: 0xc86c0000-0xc86c1fff flags: 0x4 pflags: 0x10000 state: RUNNING (CPU 1) priority: 84 container lock: sched lock 1 (0xc1220000) db>
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?50b9cf0c.0fd9650a.5bbf.ffffb9b3>