Date: Mon, 17 Feb 2014 15:51:19 +0000 (UTC) From: Andriy Gapon <avg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org Subject: svn commit: r262074 - stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs Message-ID: <201402171551.s1HFpKmX021444@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: avg Date: Mon Feb 17 15:51:19 2014 New Revision: 262074 URL: http://svnweb.freebsd.org/changeset/base/262074 Log: MFC r255226: Add sysctl/tunables for various metaslab variables MFC slacker: pjd Modified: stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c Directory Properties: stable/8/sys/ (props changed) stable/8/sys/cddl/ (props changed) stable/8/sys/cddl/contrib/opensolaris/ (props changed) Modified: stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c ============================================================================== --- stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c Mon Feb 17 15:50:40 2014 (r262073) +++ stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c Mon Feb 17 15:51:19 2014 (r262074) @@ -32,6 +32,9 @@ #include <sys/vdev_impl.h> #include <sys/zio.h> +SYSCTL_DECL(_vfs_zfs); +SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab"); + /* * Allow allocations to switch to gang blocks quickly. We do this to * avoid having to load lots of space_maps in a given txg. There are, @@ -46,6 +49,10 @@ uint64_t metaslab_aliquot = 512ULL << 10; uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ +TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang); +SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN, + &metaslab_gang_bang, 0, + "Force gang block allocation for blocks larger than or equal to this value"); /* * The in-core space map representation is more compact than its on-disk form. @@ -62,12 +69,10 @@ int zfs_condense_pct = 200; * in zio_init() unless it has been overridden in /etc/system. */ int zfs_mg_alloc_failures = 0; - -SYSCTL_DECL(_vfs_zfs); -SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RDTUN, +TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures); +SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RWTUN, &zfs_mg_alloc_failures, 0, "Number of allowed allocation failures per vdev"); -TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures); /* * The zfs_mg_noalloc_threshold defines which metaslab groups should @@ -88,6 +93,10 @@ int zfs_mg_noalloc_threshold = 0; * Metaslab debugging: when set, keeps all space maps in core to verify frees. */ static int metaslab_debug = 0; +TUNABLE_INT("vfs.zfs.metaslab.debug", &metaslab_debug); +SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug, CTLFLAG_RWTUN, &metaslab_debug, + 0, + "Metaslab debugging: when set, keeps all space maps in core to verify frees"); /* * Minimum size which forces the dynamic allocator to change @@ -96,6 +105,11 @@ static int metaslab_debug = 0; * aggressive strategy (i.e search by size rather than offset). */ uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; +TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold", + &metaslab_df_alloc_threshold); +SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN, + &metaslab_df_alloc_threshold, 0, + "Minimum size which forces the dynamic allocator to change it's allocation strategy"); /* * The minimum free space, in percent, which must be available @@ -104,22 +118,37 @@ uint64_t metaslab_df_alloc_threshold = S * switch to using best-fit allocations. */ int metaslab_df_free_pct = 4; +TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct); +SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN, + &metaslab_df_free_pct, 0, + "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion"); /* * A metaslab is considered "free" if it contains a contiguous * segment which is greater than metaslab_min_alloc_size. */ uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; +TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size", + &metaslab_min_alloc_size); +SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN, + &metaslab_min_alloc_size, 0, + "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size"); /* * Max number of space_maps to prefetch. */ int metaslab_prefetch_limit = SPA_DVAS_PER_BP; +TUNABLE_INT("vfs.zfs.metaslab.prefetch_limit", &metaslab_prefetch_limit); +SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, prefetch_limit, CTLFLAG_RWTUN, + &metaslab_prefetch_limit, 0, "Maximum number of space_maps to prefetch"); /* * Percentage bonus multiplier for metaslabs that are in the bonus area. */ int metaslab_smo_bonus_pct = 150; +TUNABLE_INT("vfs.zfs.metaslab.smo_bonus_pct", &metaslab_smo_bonus_pct); +SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, smo_bonus_pct, CTLFLAG_RWTUN, + &metaslab_smo_bonus_pct, 0, "Maximum number of space_maps to prefetch"); /* * Should we be willing to write data to degraded vdevs?
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201402171551.s1HFpKmX021444>