From owner-freebsd-hackers Thu Oct 11 11:37:38 2001 Delivered-To: freebsd-hackers@freebsd.org Received: from jim.go2net.com (jim.go2net.com [64.50.65.22]) by hub.freebsd.org (Postfix) with SMTP id EDB3237B407 for ; Thu, 11 Oct 2001 11:37:21 -0700 (PDT) Received: (qmail 20187 invoked from network); 11 Oct 2001 18:38:50 -0000 Received: from unknown (HELO ketel.go2net.com) (10.200.10.75) by jim.go2net.com with SMTP; 11 Oct 2001 18:38:49 -0000 Received: (qmail 11594 invoked from network); 11 Oct 2001 18:37:15 -0000 Received: from hunches.go2net.com (HELO infospace.com) (@[10.225.33.32]) (envelope-sender ) by ketel.go2net.com (qmail-ldap-1.03) with SMTP for ; 11 Oct 2001 18:37:15 -0000 Message-ID: <3BC5E6DB.66D93BEB@infospace.com> Date: Thu, 11 Oct 2001 11:37:15 -0700 From: Yevgeniy Aleynikov Reply-To: eugene@infospace.com X-Mailer: Mozilla 4.78 [en] (X11; U; Linux 2.2.17 i686) X-Accept-Language: ru, en MIME-Version: 1.0 To: Matt Dillon Cc: Kirk McKusick , Ian Dowse , peter@FreeBSD.ORG, ache@FreeBSD.ORG, Ken Pizzini , hackers@FreeBSD.ORG Subject: Re: bleh. Re: ufs_rename panic References: <200110030610.f936AbR11859@beastie.mckusick.com> <3BBE3F7A.98FBC714@infospace.com> <200110052320.f95NK6685878@earth.backplane.com> Content-Type: multipart/mixed; boundary="------------D6098574028207DE244366FA" Sender: owner-freebsd-hackers@FreeBSD.ORG Precedence: bulk List-ID: List-Archive: (Web Archive) List-Help: (List Instructions) List-Subscribe: List-Unsubscribe: X-Loop: FreeBSD.ORG This is a multi-part message in MIME format. --------------D6098574028207DE244366FA Content-Type: text/plain; charset=koi8-r Content-Transfer-Encoding: 7bit Here's another stable panic (not very often but on different boxes too). -- Yevgeniy Aleynikov Infospace, Inc. SysAdmin, USE Work: (206)357-4594 --------------D6098574028207DE244366FA Content-Type: text/plain; charset=koi8-r; name="server1.txt" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="server1.txt" SMP 2 cpus IdlePTD 3039232 initial pcb at 2666a0 panicstr: ffs_valloc: dup alloc panic messages: --- panic: ffs_valloc: dup alloc mp_lock = 01000001; cpuid = 1; lapic.id = 00000000 boot() called on cpu#1 syncing disks... 2 #0 dumpsys () at ../../kern/kern_shutdown.c:473 473 if (dumping++) { (kgdb) bt #0 dumpsys () at ../../kern/kern_shutdown.c:473 #1 0xc015e9df in boot (howto=256) at ../../kern/kern_shutdown.c:313 #2 0xc015ede0 in poweroff_wait (junk=0xc0232e01, howto=-1071436320) at ../../kern/kern_shutdown.c:581 #3 0xc01b6600 in ffs_valloc (pvp=0xdf902600, mode=33188, cred=0xc5b83380, vpp=0xdf541c9c) at ../../ufs/ffs/ffs_alloc.c:609 #4 0xc01c32ef in ufs_makeinode (mode=33188, dvp=0xdf902600, vpp=0xdf541edc, cnp=0xdf541ef0) at ../../ufs/ufs/ufs_vnops.c:2097 #5 0xc01c0978 in ufs_create (ap=0xdf541df8) at ../../ufs/ufs/ufs_vnops.c:194 #6 0xc01c363d in ufs_vnoperate (ap=0xdf541df8) at ../../ufs/ufs/ufs_vnops.c:2382 #7 0xc0192c88 in vn_open (ndp=0xdf541ec4, fmode=1538, cmode=420) at vnode_if.h:106 #8 0xc018ee6c in open (p=0xdf4c9a00, uap=0xdf541f80) at ../../kern/vfs_syscalls.c:1077 #9 0xc0205011 in syscall2 (frame={tf_fs = 47, tf_es = 47, tf_ds = 47, tf_edi = 1, tf_esi = 0, tf_ebp = -1077939600, tf_isp = -548134956, tf_ebx = -1077939604, tf_edx = 1537, tf_ecx = 137102023, tf_eax = 5, tf_trapno = 7, tf_err = 2, tf_eip = 674553728, tf_cs = 31, tf_eflags = 514, tf_esp = -1077939656, tf_ss = 47}) at ../../i386/i386/trap.c:1155 #10 0xc01f291b in Xint0x80_syscall () ----------------------------------------------------------------------- fs->fs_contigdirs[cg]--; } ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, (allocfcn_t *)ffs_nodealloccg); if (ino == 0) goto noinodes; error = VFS_VGET(pvp->v_mount, ino, vpp); if (error) { UFS_VFREE(pvp, ino, mode); return (error); } ip = VTOI(*vpp); if (ip->i_mode) { printf("mode = 0%o, inum = %lu, fs = %s\n", ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); panic("ffs_valloc: dup alloc"); } ------------------------------------------------------------------------ #3 0xc01b6600 in ffs_valloc (pvp=0xdf902600, mode=33188, cred=0xc5b83380, vpp=0xdf541c9c) at ../../ufs/ffs/ffs_alloc.c:609 (kgdb) print *pvp $6 = {v_flag = 0, v_usecount = 1, v_writecount = 0, v_holdcnt = 2, v_id = 69744400, v_mount = 0xc4174e00, v_op = 0xc4078700, v_freelist = { tqe_next = 0x0, tqe_prev = 0xe01f999c}, v_mntvnodes = { le_next = 0xdf949f00, le_prev = 0xe02ef924}, v_cleanblkhd = { tqh_first = 0xce8697ac, tqh_last = 0xce8697b4}, v_dirtyblkhd = { tqh_first = 0x0, tqh_last = 0xdf902634}, v_synclist = { le_next = 0xe034c7c0, le_prev = 0xe04904bc}, v_numoutput = 0, v_type = VDIR, v_un = {vu_mountedhere = 0x0, vu_socket = 0x0, vu_spec = { vu_specinfo = 0x0, vu_specnext = {sle_next = 0x0}}, vu_fifoinfo = 0x0}, v_lease = 0x0, v_lastw = 0, v_cstart = 0, v_lasta = 0, v_clen = 0, v_object = 0x0, v_interlock = {lock_data = 0}, v_vnlock = 0xc448d400, v_tag = VT_UFS, v_data = 0xc448d400, v_cache_src = {lh_first = 0xc5630f40}, v_cache_dst = {tqh_first = 0xc5dcfb80, tqh_last = 0xc5dcfb90}, v_dd = 0xdfac9b00, v_ddid = 69744310, v_pollinfo = {vpi_lock = { lock_data = 0}, vpi_selinfo = {si_pid = 0, si_note = {slh_first = 0x0}, si_flags = 0}, vpi_events = 0, vpi_revents = 0}, v_vxproc = 0x0} (kgdb) print **vpp $2 = {v_flag = 0, v_usecount = 1, v_writecount = 0, v_holdcnt = 0, v_id = 69818641, v_mount = 0xc4174e00, v_op = 0xc4078700, v_freelist = { tqe_next = 0xdf941dc0, tqe_prev = 0xc02670d0}, v_mntvnodes = { le_next = 0xdfd79a80, le_prev = 0xc4174e18}, v_cleanblkhd = { tqh_first = 0x0, tqh_last = 0xdfa89e2c}, v_dirtyblkhd = {tqh_first = 0x0, tqh_last = 0xdfa89e34}, v_synclist = {le_next = 0x0, le_prev = 0xc407b0cc}, v_numoutput = 0, v_type = VDIR, v_un = { vu_mountedhere = 0x0, vu_socket = 0x0, vu_spec = {vu_specinfo = 0x0, vu_specnext = {sle_next = 0x0}}, vu_fifoinfo = 0x0}, v_lease = 0x0, v_lastw = 0, v_cstart = 0, v_lasta = 0, v_clen = 0, v_object = 0x0, v_interlock = {lock_data = 0}, v_vnlock = 0xc55f1200, v_tag = VT_UFS, v_data = 0xc55f1200, v_cache_src = {lh_first = 0x0}, v_cache_dst = { tqh_first = 0x0, tqh_last = 0xdfa89e80}, v_dd = 0xdfa89e00, v_ddid = 0, v_pollinfo = {vpi_lock = {lock_data = 0}, vpi_selinfo = {si_pid = 0, si_note = {slh_first = 0x0}, si_flags = 0}, vpi_events = 0, vpi_revents = 0}, v_vxproc = 0x0} (kgdb) print mode $3 = 33188 print cred $4 = (struct ucred *) 0x0 ---------------------------------------------------------------- #4 0xc01c32ef in ufs_makeinode (mode=33188, dvp=0xdf902600, vpp=0xdf541edc, cnp=0xdf541ef0) at ../../ufs/ufs/ufs_vnops.c:2097 (kgdb) print *cnp $7 = {cn_nameiop = 1, cn_flags = 52300, cn_proc = 0xdf4c9a00, cn_cred = 0xc5b83380, cn_pnbuf = 0xdf6c9c00 "/data1/hypermart.net/mv/en/img.titles/_vti_cnf/.en_servicos.gif.tmp", cn_nameptr = 0xdf6c9c2f ".en_servicos.gif.tmp", cn_namelen = 20, cn_consume = 0} (kgdb) print *dvp $8 = {v_flag = 0, v_usecount = 1, v_writecount = 0, v_holdcnt = 2, v_id = 69744400, v_mount = 0xc4174e00, v_op = 0xc4078700, v_freelist = { tqe_next = 0x0, tqe_prev = 0xe01f999c}, v_mntvnodes = { le_next = 0xdf949f00, le_prev = 0xe02ef924}, v_cleanblkhd = { tqh_first = 0xce8697ac, tqh_last = 0xce8697b4}, v_dirtyblkhd = { tqh_first = 0x0, tqh_last = 0xdf902634}, v_synclist = { le_next = 0xe034c7c0, le_prev = 0xe04904bc}, v_numoutput = 0, v_type = VDIR, v_un = {vu_mountedhere = 0x0, vu_socket = 0x0, vu_spec = { vu_specinfo = 0x0, vu_specnext = {sle_next = 0x0}}, vu_fifoinfo = 0x0}, v_lease = 0x0, v_lastw = 0, v_cstart = 0, v_lasta = 0, v_clen = 0, v_object = 0x0, v_interlock = {lock_data = 0}, v_vnlock = 0xc448d400, v_tag = VT_UFS, v_data = 0xc448d400, v_cache_src = {lh_first = 0xc5630f40}, v_cache_dst = {tqh_first = 0xc5dcfb80, tqh_last = 0xc5dcfb90}, v_dd = 0xdfac9b00, v_ddid = 69744310, v_pollinfo = {vpi_lock = { lock_data = 0}, vpi_selinfo = {si_pid = 0, si_note = {slh_first = 0x0}, si_flags = 0}, vpi_events = 0, vpi_revents = 0}, v_vxproc = 0x0} (kgdb) print *vpp $9 = (struct vnode *) 0x0 -------------------------------------------------- #5 0xc01c0978 in ufs_create (ap=0xdf541df8) at ../../ufs/ufs/ufs_vnops.c:194 (kgdb) print *ap $11 = {a_desc = 0xc0246b40, a_dvp = 0xdf902600, a_vpp = 0xdf541edc, a_cnp = 0xdf541ef0, a_vap = 0xdf541e0c} (kgdb) print *ap->a_desc $13 = {vdesc_offset = 11, vdesc_name = 0xc0218151 "vop_create", vdesc_flags = 0, vdesc_vp_offsets = 0xc0246b24, vdesc_vpp_offset = 8, vdesc_cred_offset = -1, vdesc_proc_offset = -1, vdesc_componentname_offset = 12, vdesc_transports = 0x0} (kgdb) print *ap->a_vpp $21 = (struct vnode *) 0x0 (kgdb) print *ap->a_dvp $22 = {v_flag = 0, v_usecount = 1, v_writecount = 0, v_holdcnt = 2, v_id = 69744400, v_mount = 0xc4174e00, v_op = 0xc4078700, v_freelist = { tqe_next = 0x0, tqe_prev = 0xe01f999c}, v_mntvnodes = { le_next = 0xdf949f00, le_prev = 0xe02ef924}, v_cleanblkhd = { tqh_first = 0xce8697ac, tqh_last = 0xce8697b4}, v_dirtyblkhd = { tqh_first = 0x0, tqh_last = 0xdf902634}, v_synclist = { le_next = 0xe034c7c0, le_prev = 0xe04904bc}, v_numoutput = 0, v_type = VDIR, v_un = {vu_mountedhere = 0x0, vu_socket = 0x0, vu_spec = { vu_specinfo = 0x0, vu_specnext = {sle_next = 0x0}}, vu_fifoinfo = 0x0}, v_lease = 0x0, v_lastw = 0, v_cstart = 0, v_lasta = 0, v_clen = 0, v_object = 0x0, v_interlock = {lock_data = 0}, v_vnlock = 0xc448d400, v_tag = VT_UFS, v_data = 0xc448d400, v_cache_src = {lh_first = 0xc5630f40}, v_cache_dst = {tqh_first = 0xc5dcfb80, tqh_last = 0xc5dcfb90}, v_dd = 0xdfac9b00, v_ddid = 69744310, v_pollinfo = {vpi_lock = { lock_data = 0}, vpi_selinfo = {si_pid = 0, si_note = {slh_first = 0x0}, si_flags = 0}, vpi_events = 0, vpi_revents = 0}, v_vxproc = 0x0} ------------------------------------------------------ #6 0xc01c363d in ufs_vnoperate (ap=0xdf541df8) at ../../ufs/ufs/ufs_vnops.c:2382 (kgdb) print ap $26 = (struct vop_generic_args *) 0x0 ------------------------------------------------------ #7 0xc0192c88 in vn_open (ndp=0xdf541ec4, fmode=1538, cmode=420) at vnode_if.h:106 (kgdb) print *ndp $27 = {ni_dirp = 0x848d28ccannot read proc at 0 ------------------------------------------------------ #8 0xc018ee6c in open (p=0xdf4c9a00, uap=0xdf541f80) at ../../kern/vfs_syscalls.c:1077 (kgdb) print *p $31 = {p_procq = {tqe_next = 0x0, tqe_prev = 0xc0277b40}, p_list = { le_next = 0xdf5788a0, le_prev = 0xc0277ad8}, p_cred = 0xc43c2500, p_fd = 0xc4605400, p_stats = 0xdf540cd0, p_limit = 0xc4879a00, p_upages_obj = 0xdf5338a0, p_procsig = 0xc5447ac0, p_flag = 16389, p_stat = 2 '\002', p_pad1 = "\000\000", p_pid = 41233, p_hash = { le_next = 0x0, le_prev = 0xc405d444}, p_pglist = {le_next = 0xdf576340, le_prev = 0xe051b0bc}, p_pptr = 0xe051b080, p_sibling = {le_next = 0x0, le_prev = 0xe051b0d0}, p_children = {lh_first = 0x0}, p_ithandle = { callout = 0xce7ec0a8}, p_oppid = 0, p_dupfd = -4, p_vmspace = 0xe053d280, p_estcpu = 93, p_cpticks = 85, p_pctcpu = 45, p_wchan = 0x0, p_wmesg = 0xc0229f89 "biord", p_swtime = 1, p_slptime = 0, p_realtimer = { it_interval = {tv_sec = 0, tv_usec = 0}, it_value = {tv_sec = 1796341, tv_usec = 491839}}, p_runtime = 90127, p_uu = 0, p_su = 0, p_iu = 0, p_uticks = 9, p_sticks = 134, p_iticks = 0, p_traceflag = 0, p_tracep = 0x0, p_siglist = {__bits = {0, 0, 0, 0}}, p_textvp = 0xe0094fc0, p_lock = 0 '\000', p_oncpu = 1 '\001', p_lastcpu = 0 '\000', p_rqindex = 4 '\004', p_locks = -158, p_simple_locks = 0, p_stops = 0, p_stype = 0, p_step = 0 '\000', p_pfsflags = 0 '\000', p_pad3 = "\000", p_retval = {0, 1537}, p_sigiolst = {slh_first = 0x0}, p_sigparent = 20, p_oldsigmask = {__bits = {0, 0, 0, 0}}, p_sig = 0, p_code = 0, p_klist = { slh_first = 0x0}, p_sigmask = {__bits = {0, 0, 0, 0}}, p_sigstk = { ss_sp = 0x0, ss_size = 0, ss_flags = 4}, p_priority = 16 '\020', p_usrpri = 61 '=', p_nice = 0 '\000', p_comm = "author.exe\000\000\000\000\000\000", p_pgrp = 0xc41a3e40, p_sysent = 0xc024c3c0, p_rtprio = {type = 1, prio = 0}, p_prison = 0x0, p_args = 0xc5fff080, p_addr = 0xdf540000, p_md = {md_regs = 0xdf541fa8}, p_xstat = 0, p_acflag = 2, p_ru = 0x0, p_nthreads = 0, p_aioinfo = 0x0, p_wakeup = 0, p_peers = 0x0, p_leader = 0xdf4c9a00, p_asleep = { as_priority = 0, as_timo = 0}, p_emuldata = 0x0} (kgdb) print *uap $32 = {path = 0x848d28ccannot read proc at 0 ----------------------------------------------------------------- --------------D6098574028207DE244366FA-- To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe freebsd-hackers" in the body of the message