From owner-svn-src-all@freebsd.org Thu Dec 5 00:03:18 2019 Return-Path: Delivered-To: svn-src-all@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id 8C66E1B26BF; Thu, 5 Dec 2019 00:03:18 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 47Swsf2zcnz4Vjn; Thu, 5 Dec 2019 00:03:18 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 497D284FF; Thu, 5 Dec 2019 00:03:18 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id xB503ID4008637; Thu, 5 Dec 2019 00:03:18 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id xB503HtP008634; Thu, 5 Dec 2019 00:03:17 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201912050003.xB503HtP008634@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Thu, 5 Dec 2019 00:03:17 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r355407 - head/sys/fs/tmpfs X-SVN-Group: head X-SVN-Commit-Author: kib X-SVN-Commit-Paths: head/sys/fs/tmpfs X-SVN-Commit-Revision: 355407 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 05 Dec 2019 00:03:18 -0000 Author: kib Date: Thu Dec 5 00:03:17 2019 New Revision: 355407 URL: https://svnweb.freebsd.org/changeset/base/355407 Log: Stop using per-mount tmpfs zones. Requested and reviewed by: jeff Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D22643 Modified: head/sys/fs/tmpfs/tmpfs.h head/sys/fs/tmpfs/tmpfs_subr.c head/sys/fs/tmpfs/tmpfs_vfsops.c Modified: head/sys/fs/tmpfs/tmpfs.h ============================================================================== --- head/sys/fs/tmpfs/tmpfs.h Wed Dec 4 23:24:40 2019 (r355406) +++ head/sys/fs/tmpfs/tmpfs.h Thu Dec 5 00:03:17 2019 (r355407) @@ -378,10 +378,6 @@ struct tmpfs_mount { /* All node lock to protect the node list and tmp_pages_used. */ struct mtx tm_allnode_lock; - /* Zones used to store file system meta data, per tmpfs mount. */ - uma_zone_t tm_dirent_pool; - uma_zone_t tm_node_pool; - /* Read-only status. */ bool tm_ronly; /* Do not use namecache. */ @@ -493,8 +489,9 @@ struct tmpfs_dirent *tmpfs_dir_next(struct tmpfs_node #endif size_t tmpfs_mem_avail(void); - size_t tmpfs_pages_used(struct tmpfs_mount *tmp); +void tmpfs_subr_init(void); +void tmpfs_subr_uninit(void); #endif Modified: head/sys/fs/tmpfs/tmpfs_subr.c ============================================================================== --- head/sys/fs/tmpfs/tmpfs_subr.c Wed Dec 4 23:24:40 2019 (r355406) +++ head/sys/fs/tmpfs/tmpfs_subr.c Thu Dec 5 00:03:17 2019 (r355407) @@ -72,7 +72,74 @@ SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmp static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED; +static uma_zone_t tmpfs_dirent_pool; +static uma_zone_t tmpfs_node_pool; + static int +tmpfs_node_ctor(void *mem, int size, void *arg, int flags) +{ + struct tmpfs_node *node; + + node = mem; + node->tn_gen++; + node->tn_size = 0; + node->tn_status = 0; + node->tn_flags = 0; + node->tn_links = 0; + node->tn_vnode = NULL; + node->tn_vpstate = 0; + return (0); +} + +static void +tmpfs_node_dtor(void *mem, int size, void *arg) +{ + struct tmpfs_node *node; + + node = mem; + node->tn_type = VNON; +} + +static int +tmpfs_node_init(void *mem, int size, int flags) +{ + struct tmpfs_node *node; + + node = mem; + node->tn_id = 0; + mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF); + node->tn_gen = arc4random(); + return (0); +} + +static void +tmpfs_node_fini(void *mem, int size) +{ + struct tmpfs_node *node; + + node = mem; + mtx_destroy(&node->tn_interlock); +} + +void +tmpfs_subr_init(void) +{ + tmpfs_dirent_pool = uma_zcreate("TMPFS dirent", + sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, 0); + tmpfs_node_pool = uma_zcreate("TMPFS node", + sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor, + tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0); +} + +void +tmpfs_subr_uninit(void) +{ + uma_zdestroy(tmpfs_node_pool); + uma_zdestroy(tmpfs_dirent_pool); +} + +static int sysctl_mem_reserved(SYSCTL_HANDLER_ARGS) { int error; @@ -219,8 +286,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount if ((mp->mnt_kern_flag & MNT_RDONLY) != 0) return (EROFS); - nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp, - M_WAITOK); + nnode = uma_zalloc_arg(tmpfs_node_pool, tmp, M_WAITOK); /* Generic initialization. */ nnode->tn_type = type; @@ -367,7 +433,7 @@ tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type); } - uma_zfree(tmp->tm_node_pool, node); + uma_zfree(tmpfs_node_pool, node); TMPFS_LOCK(tmp); tmpfs_free_tmp(tmp); return (true); @@ -434,7 +500,7 @@ tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmp { struct tmpfs_dirent *nde; - nde = uma_zalloc(tmp->tm_dirent_pool, M_WAITOK); + nde = uma_zalloc(tmpfs_dirent_pool, M_WAITOK); nde->td_node = node; if (name != NULL) { nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK); @@ -470,7 +536,7 @@ tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpf } if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL) free(de->ud.td_name, M_TMPFSNAME); - uma_zfree(tmp->tm_dirent_pool, de); + uma_zfree(tmpfs_dirent_pool, de); } void Modified: head/sys/fs/tmpfs/tmpfs_vfsops.c ============================================================================== --- head/sys/fs/tmpfs/tmpfs_vfsops.c Wed Dec 4 23:24:40 2019 (r355406) +++ head/sys/fs/tmpfs/tmpfs_vfsops.c Thu Dec 5 00:03:17 2019 (r355407) @@ -99,49 +99,6 @@ static const char *tmpfs_updateopts[] = { "from", "export", "size", NULL }; -static int -tmpfs_node_ctor(void *mem, int size, void *arg, int flags) -{ - struct tmpfs_node *node = (struct tmpfs_node *)mem; - - node->tn_gen++; - node->tn_size = 0; - node->tn_status = 0; - node->tn_flags = 0; - node->tn_links = 0; - node->tn_vnode = NULL; - node->tn_vpstate = 0; - - return (0); -} - -static void -tmpfs_node_dtor(void *mem, int size, void *arg) -{ - struct tmpfs_node *node = (struct tmpfs_node *)mem; - node->tn_type = VNON; -} - -static int -tmpfs_node_init(void *mem, int size, int flags) -{ - struct tmpfs_node *node = (struct tmpfs_node *)mem; - node->tn_id = 0; - - mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF); - node->tn_gen = arc4random(); - - return (0); -} - -static void -tmpfs_node_fini(void *mem, int size) -{ - struct tmpfs_node *node = (struct tmpfs_node *)mem; - - mtx_destroy(&node->tn_interlock); -} - /* * Handle updates of time from writes to mmaped regions. Use * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since @@ -481,12 +438,6 @@ tmpfs_mount(struct mount *mp) tmp->tm_pages_max = pages; tmp->tm_pages_used = 0; new_unrhdr64(&tmp->tm_ino_unr, 2); - tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent", - sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, 0); - tmp->tm_node_pool = uma_zcreate("TMPFS node", - sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor, - tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0); tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; tmp->tm_nonc = nonc; @@ -495,8 +446,6 @@ tmpfs_mount(struct mount *mp) root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); if (error != 0 || root == NULL) { - uma_zdestroy(tmp->tm_node_pool); - uma_zdestroy(tmp->tm_dirent_pool); free(tmp, M_TMPFSMNT); return (error); } @@ -590,9 +539,6 @@ tmpfs_free_tmp(struct tmpfs_mount *tmp) } TMPFS_UNLOCK(tmp); - uma_zdestroy(tmp->tm_dirent_pool); - uma_zdestroy(tmp->tm_node_pool); - mtx_destroy(&tmp->tm_allnode_lock); MPASS(tmp->tm_pages_used == 0); MPASS(tmp->tm_nodes_inuse == 0); @@ -702,10 +648,23 @@ tmpfs_susp_clean(struct mount *mp __unused) { } +static int +tmpfs_init(struct vfsconf *conf) +{ + tmpfs_subr_init(); + return (0); +} + +static int +tmpfs_uninit(struct vfsconf *conf) +{ + tmpfs_subr_uninit(); + return (0); +} + /* * tmpfs vfs operations. */ - struct vfsops tmpfs_vfsops = { .vfs_mount = tmpfs_mount, .vfs_unmount = tmpfs_unmount, @@ -715,5 +674,7 @@ struct vfsops tmpfs_vfsops = { .vfs_fhtovp = tmpfs_fhtovp, .vfs_sync = tmpfs_sync, .vfs_susp_clean = tmpfs_susp_clean, + .vfs_init = tmpfs_init, + .vfs_uninit = tmpfs_uninit, }; VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);