Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 28 Nov 2019 07:49:26 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r355169 - in head/sys: kern vm
Message-ID:  <201911280749.xAS7nQvU013856@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Thu Nov 28 07:49:25 2019
New Revision: 355169
URL: https://svnweb.freebsd.org/changeset/base/355169

Log:
  Garbage collect the mostly unused us_keg field.  Use appropriately named
  union members in vm_page.h to store the zone and slab.  Remove some nearby
  dead code.
  
  Reviewed by:	markj
  Differential Revision:	https://reviews.freebsd.org/D22564

Modified:
  head/sys/kern/kern_malloc.c
  head/sys/vm/uma_core.c
  head/sys/vm/uma_int.h
  head/sys/vm/vm_page.h

Modified: head/sys/kern/kern_malloc.c
==============================================================================
--- head/sys/kern/kern_malloc.c	Thu Nov 28 05:40:15 2019	(r355168)
+++ head/sys/kern/kern_malloc.c	Thu Nov 28 07:49:25 2019	(r355169)
@@ -738,6 +738,7 @@ free_dbg(void **addrp, struct malloc_type *mtp)
 void
 free(void *addr, struct malloc_type *mtp)
 {
+	uma_zone_t zone;
 	uma_slab_t slab;
 	u_long size;
 
@@ -749,17 +750,17 @@ free(void *addr, struct malloc_type *mtp)
 	if (addr == NULL)
 		return;
 
-	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
+	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
 	if (slab == NULL)
 		panic("free: address %p(%p) has not been allocated.\n",
 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
 
 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
-		size = slab->us_keg->uk_size;
+		size = zone->uz_size;
 #ifdef INVARIANTS
 		free_save_type(addr, mtp, size);
 #endif
-		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
+		uma_zfree_arg(zone, addr, slab);
 	} else {
 		size = slab->us_size;
 		uma_large_free(slab);
@@ -770,6 +771,7 @@ free(void *addr, struct malloc_type *mtp)
 void
 free_domain(void *addr, struct malloc_type *mtp)
 {
+	uma_zone_t zone;
 	uma_slab_t slab;
 	u_long size;
 
@@ -782,18 +784,17 @@ free_domain(void *addr, struct malloc_type *mtp)
 	if (addr == NULL)
 		return;
 
-	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
+	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
 	if (slab == NULL)
 		panic("free_domain: address %p(%p) has not been allocated.\n",
 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
 
 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
-		size = slab->us_keg->uk_size;
+		size = zone->uz_size;
 #ifdef INVARIANTS
 		free_save_type(addr, mtp, size);
 #endif
-		uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
-		    addr, slab);
+		uma_zfree_domain(zone, addr, slab);
 	} else {
 		size = slab->us_size;
 		uma_large_free(slab);
@@ -807,6 +808,7 @@ free_domain(void *addr, struct malloc_type *mtp)
 void *
 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
 {
+	uma_zone_t zone;
 	uma_slab_t slab;
 	unsigned long alloc;
 	void *newaddr;
@@ -834,7 +836,7 @@ realloc(void *addr, size_t size, struct malloc_type *m
 	slab = NULL;
 	alloc = redzone_get_size(addr);
 #else
-	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
+	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
 
 	/* Sanity check */
 	KASSERT(slab != NULL,
@@ -842,7 +844,7 @@ realloc(void *addr, size_t size, struct malloc_type *m
 
 	/* Get the size of the original block */
 	if (!(slab->us_flags & UMA_SLAB_MALLOC))
-		alloc = slab->us_keg->uk_size;
+		alloc = zone->uz_size;
 	else
 		alloc = slab->us_size;
 

Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c	Thu Nov 28 05:40:15 2019	(r355168)
+++ head/sys/vm/uma_core.c	Thu Nov 28 07:49:25 2019	(r355169)
@@ -1,7 +1,7 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
- * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
  * Copyright (c) 2004-2006 Robert N. M. Watson
  * All rights reserved.
@@ -275,7 +275,6 @@ static uma_bucket_t bucket_alloc(uma_zone_t zone, void
 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
 static void bucket_zone_drain(void);
 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
-static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
@@ -1210,9 +1209,9 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int dom
 
 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
 		for (i = 0; i < keg->uk_ppera; i++)
-			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
+			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
+			    zone, slab);
 
-	slab->us_keg = keg;
 	slab->us_data = mem;
 	slab->us_freecount = keg->uk_ipers;
 	slab->us_flags = sflags;
@@ -3017,10 +3016,8 @@ restart:
 
 	for (;;) {
 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
-		if (slab != NULL) {
-			MPASS(slab->us_keg == keg);
+		if (slab != NULL)
 			return (slab);
-		}
 
 		/*
 		 * M_NOVM means don't ask at all!
@@ -3039,7 +3036,6 @@ restart:
 		 * at least one item.
 		 */
 		if (slab) {
-			MPASS(slab->us_keg == keg);
 			dom = &keg->uk_domain[slab->us_domain];
 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
 			return (slab);
@@ -3062,33 +3058,11 @@ restart:
 	 * fail.
 	 */
 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
-		MPASS(slab->us_keg == keg);
 		return (slab);
 	}
 	return (NULL);
 }
 
-static uma_slab_t
-zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
-{
-	uma_slab_t slab;
-
-	if (keg == NULL) {
-		keg = zone->uz_keg;
-		KEG_LOCK(keg);
-	}
-
-	for (;;) {
-		slab = keg_fetch_slab(keg, zone, domain, flags);
-		if (slab)
-			return (slab);
-		if (flags & (M_NOWAIT | M_NOVM))
-			break;
-	}
-	KEG_UNLOCK(keg);
-	return (NULL);
-}
-
 static void *
 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
 {
@@ -3096,7 +3070,6 @@ slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
 	void *item;
 	uint8_t freei;
 
-	MPASS(keg == slab->us_keg);
 	KEG_LOCK_ASSERT(keg);
 
 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
@@ -3126,12 +3099,12 @@ zone_import(uma_zone_t zone, void **bucket, int max, i
 	int i;
 
 	slab = NULL;
-	keg = NULL;
+	keg = zone->uz_keg;
+	KEG_LOCK(keg);
 	/* Try to keep the buckets totally full */
 	for (i = 0; i < max; ) {
-		if ((slab = zone_fetch_slab(zone, keg, domain, flags)) == NULL)
+		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
 			break;
-		keg = slab->us_keg;
 #ifdef NUMA
 		stripe = howmany(max, vm_ndomains);
 #endif
@@ -3157,8 +3130,7 @@ zone_import(uma_zone_t zone, void **bucket, int max, i
 		flags &= ~M_WAITOK;
 		flags |= M_NOWAIT;
 	}
-	if (slab != NULL)
-		KEG_UNLOCK(keg);
+	KEG_UNLOCK(keg);
 
 	return i;
 }
@@ -3599,7 +3571,6 @@ slab_free_item(uma_zone_t zone, uma_slab_t slab, void 
 	keg = zone->uz_keg;
 	MPASS(zone->uz_lockptr == &keg->uk_lock);
 	KEG_LOCK_ASSERT(keg);
-	MPASS(keg == slab->us_keg);
 
 	dom = &keg->uk_domain[slab->us_domain];
 
@@ -3642,10 +3613,8 @@ zone_release(uma_zone_t zone, void **bucket, int cnt)
 				mem += keg->uk_pgoff;
 				slab = (uma_slab_t)mem;
 			}
-		} else {
+		} else
 			slab = vtoslab((vm_offset_t)item);
-			MPASS(slab->us_keg == keg);
-		}
 		slab_free_item(zone, slab, item);
 	}
 	KEG_UNLOCK(keg);
@@ -3996,7 +3965,6 @@ uma_prealloc(uma_zone_t zone, int items)
 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
 			    aflags);
 			if (slab != NULL) {
-				MPASS(slab->us_keg == keg);
 				dom = &keg->uk_domain[slab->us_domain];
 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
 				    us_link);
@@ -4134,7 +4102,7 @@ uma_large_malloc_domain(vm_size_t size, int domain, in
 	    DOMAINSET_FIXED(domain);
 	addr = kmem_malloc_domainset(policy, size, wait);
 	if (addr != 0) {
-		vsetslab(addr, slab);
+		vsetzoneslab(addr, NULL, slab);
 		slab->us_data = (void *)addr;
 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
 		slab->us_size = size;
@@ -4546,7 +4514,7 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *
 			panic("uma: item %p did not belong to zone %s\n",
 			    item, zone->uz_name);
 	}
-	keg = slab->us_keg;
+	keg = zone->uz_keg;
 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 
 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
@@ -4574,7 +4542,7 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *i
 			panic("uma: Freed item %p did not belong to zone %s\n",
 			    item, zone->uz_name);
 	}
-	keg = slab->us_keg;
+	keg = zone->uz_keg;
 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 
 	if (freei >= keg->uk_ipers)

Modified: head/sys/vm/uma_int.h
==============================================================================
--- head/sys/vm/uma_int.h	Thu Nov 28 05:40:15 2019	(r355168)
+++ head/sys/vm/uma_int.h	Thu Nov 28 07:49:25 2019	(r355169)
@@ -1,7 +1,7 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
- * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
  * All rights reserved.
  *
@@ -281,7 +281,6 @@ BITSET_DEFINE(slabbits, SLAB_SETSIZE);
  * store and subdivides it into individually allocatable items.
  */
 struct uma_slab {
-	uma_keg_t	us_keg;			/* Keg we live in */
 	union {
 		LIST_ENTRY(uma_slab)	_us_link;	/* slabs in zone */
 		unsigned long	_us_size;	/* Size of allocation */
@@ -478,16 +477,27 @@ vtoslab(vm_offset_t va)
 	vm_page_t p;
 
 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
-	return ((uma_slab_t)p->plinks.s.pv);
+	return (p->plinks.uma.slab);
 }
 
 static __inline void
-vsetslab(vm_offset_t va, uma_slab_t slab)
+vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
 {
 	vm_page_t p;
 
 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
-	p->plinks.s.pv = slab;
+	*slab = p->plinks.uma.slab;
+	*zone = p->plinks.uma.zone;
+}
+
+static __inline void
+vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
+{
+	vm_page_t p;
+
+	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
+	p->plinks.uma.slab = slab;
+	p->plinks.uma.zone = zone;
 }
 
 /*

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Thu Nov 28 05:40:15 2019	(r355168)
+++ head/sys/vm/vm_page.h	Thu Nov 28 07:49:25 2019	(r355169)
@@ -220,12 +220,15 @@ struct vm_page {
 		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
 		struct {
 			SLIST_ENTRY(vm_page) ss; /* private slists */
-			void *pv;
 		} s;
 		struct {
 			u_long p;
 			u_long v;
 		} memguard;
+		struct {
+			void *slab;
+			void *zone;
+		} uma;
 	} plinks;
 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
 	vm_object_t object;		/* which object am I in (O) */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201911280749.xAS7nQvU013856>