Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 17 May 2006 00:07:55 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 97311 for review
Message-ID:  <200605170007.k4H07tYU010598@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=97311

Change 97311 by kmacy@kmacy_storage:sun4v_rwbuf on 2006/05/17 00:07:02

	reduce code covered by bucket locking - make sure that no memory 
	accesses outside the direct memory occur while a bucket lock is held 

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#27 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#27 (text+ko) ====

@@ -142,24 +142,25 @@
 	uma_zfree(thzone, th);
 }
 
-static int
+#if 0
+static uint64_t
 hash_bucket_lock(tte_hash_field_t fields) 
 {
 	uint64_t data;
 	int s;
-	s = intr_disable_all();
+	s = intr_disable();
 
 	data = fields[0].tte.data & ~VTD_LOCK;
 	while (atomic_cmpset_long(&fields[0].tte.data, data, data | VTD_LOCK))
 		data = fields[0].tte.data & ~VTD_LOCK;
 
 	membar(Sync);
-	return s;
+	return (uint64_t)s;
 		
 }
 
-static __inline void
-hash_bucket_unlock_inline(tte_hash_field_t fields, int s) 
+static void
+hash_bucket_unlock(tte_hash_field_t fields, uint64_t s) 
 {
 
 	membar(StoreStore|LoadStore);
@@ -170,9 +171,15 @@
 
 	fields[0].tte.data &= ~VTD_LOCK;
 	membar(Sync);
-	intr_restore_all(s);
+	intr_restore(s);
 }
 
+#else 
+extern uint64_t hash_bucket_lock(tte_hash_field_t fields);
+
+extern void hash_bucket_unlock(tte_hash_field_t fields, uint64_t s);
+
+#endif
 void 
 tte_hash_init(void)
 {
@@ -281,7 +288,7 @@
 }
 
 static tte_hash_field_t 
-tte_hash_allocate_fragment_entry(tte_hash_t th, tte_hash_field_t field)
+tte_hash_allocate_fragment_entry(tte_hash_t th)
 {
 	struct tte_hash_fragment *fh;
 	tte_hash_field_t newfield;
@@ -309,11 +316,8 @@
 	} 
 	newfield = fh->thf_entries[++fh->thf_head.fh_free_head].the_fields;
 	fh->thf_head.fh_count++; 
-	tte_hash_set_field(&newfield[0], field->tte.tag, field->tte.data); 
-	field->of.flags = TH_COLLISION;
-	field->of.next = newfield;
 
-	return (&newfield[1]);
+	return (&newfield[0]);
 }
 
 /*
@@ -327,18 +331,13 @@
 
 
 static __inline tte_t 
-tte_hash_lookup_inline(tte_hash_t th, vm_offset_t va, tte_hash_field_t *field)
+tte_hash_lookup_inline(tte_hash_field_t fields, vm_offset_t va, boolean_t setfield)
 {
-	uint64_t hash_shift, hash_index;
-	tte_hash_field_t fields;
 	int i;
 	tte_t entry;
 
 	/* XXX - only handle 8K pages for now */
 
-	hash_shift = PAGE_SHIFT;
-	hash_index = (va >> hash_shift) & HASH_MASK(th);
-	fields = (th->th_hashtable[hash_index].the_fields);
 	entry = 0;
 
 retry:
@@ -359,24 +358,18 @@
 	if (i >= HASH_ENTRIES)
 		panic("invalid state");
 
-	if (field) 
-		*field = &fields[i];
+	if (setfield == TRUE) 
+		PCPU_SET(lookup_field, (u_long)&fields[i]);
 
 	return (entry);
 }
 
 
 static __inline void
-tte_hash_lookup_last_inline(tte_hash_t th, vm_offset_t va, tte_hash_field_t *field)
+tte_hash_lookup_last_inline(tte_hash_field_t fields, vm_offset_t va)
 {
-	uint64_t hash_shift, hash_index;
-	tte_hash_field_t fields;
 	int i, index;
 	/* XXX - only handle 8K pages for now */
-
-	hash_shift = PAGE_SHIFT;
-	hash_index = (va >> hash_shift) & HASH_MASK(th);
-	fields = (th->th_hashtable[hash_index].the_fields);
 	index = -1;;
 	
 
@@ -388,7 +381,7 @@
 		}
 
 	if (index != -1) 
-		*field = &fields[index];
+		PCPU_SET(last_field, (u_long)&fields[index]);
 	else {
 		if (fields[(HASH_ENTRIES - 1)].of.flags & TH_COLLISION) {
 			if (fields[(HASH_ENTRIES - 1)].of.next[0].tte.tag != 0) {
@@ -396,13 +389,13 @@
 				goto retry;
 			} else {
 				/* 3rd entry is last */
-				*field = &fields[(HASH_ENTRIES - 2)];
+				PCPU_SET(last_field, (u_long)&fields[(HASH_ENTRIES - 2)]);
 				/* clear collision pointer */
 				tte_hash_set_field(&fields[(HASH_ENTRIES - 1)], 0, 0);
 
 			}
 		} else
-			*field = &fields[(HASH_ENTRIES - 1)]; /* last in bucket */
+			PCPU_SET(last_field, (u_long)&fields[(HASH_ENTRIES - 1)]); /* last in bucket */
 	}
 		
 }
@@ -410,10 +403,9 @@
 tte_t
 tte_hash_clear_bits(tte_hash_t th, vm_offset_t va, uint64_t flags)
 {
-	uint64_t hash_shift, hash_index;
-	tte_hash_field_t fields, lookup_field;
+	uint64_t hash_shift, hash_index, s;
+	tte_hash_field_t fields;
 	tte_t otte_data;
-	int s;
 
 	/* XXX - only handle 8K pages for now */
 	hash_shift = PAGE_SHIFT;
@@ -421,11 +413,12 @@
 	fields = (th->th_hashtable[hash_index].the_fields);
 
 	s = hash_bucket_lock(fields);
-	if((otte_data = tte_hash_lookup_inline(th, va, &lookup_field)) != 0)
-		tte_hash_set_field(lookup_field, lookup_field->tte.tag, 
-				   lookup_field->tte.data & ~flags);
+	if((otte_data = tte_hash_lookup_inline(fields, va, TRUE)) != 0)
+		tte_hash_set_field((tte_hash_field_t)PCPU_GET(lookup_field), 
+				   ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.tag, 
+				   ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.data & ~flags);
 
-	hash_bucket_unlock_inline(fields, s);
+	hash_bucket_unlock(fields, s);
 
 	return (otte_data);
 }
@@ -433,37 +426,41 @@
 tte_t
 tte_hash_delete(tte_hash_t th, vm_offset_t va)
 {
-	uint64_t hash_shift, hash_index;
-	tte_hash_field_t fields, lookup_field, last_field;
+	uint64_t hash_shift, hash_index, s;
+	tte_hash_field_t fields;
 	tte_t tte_data;
-	int s;
-	
 	/* XXX - only handle 8K pages for now */
 
 	hash_shift = PAGE_SHIFT;
 	hash_index = (va >> hash_shift) & HASH_MASK(th);
 	fields = (th->th_hashtable[hash_index].the_fields);
 
+
 	s  = hash_bucket_lock(fields);
 	
-	if ((tte_data = tte_hash_lookup_inline(th, va, &lookup_field)) == 0) 
+	if ((tte_data = tte_hash_lookup_inline(fields, va, TRUE)) == 0) 
 		goto done;
 
-	th->th_entries--;
-
-	tte_hash_lookup_last_inline(th, va, &last_field);
+	tte_hash_lookup_last_inline(fields, va);
 
 #ifdef DEBUG
-	if (last_field->tte.tag == 0)
+	if (((tte_hash_field_t)PCPU_GET(last_field))->tte.tag == 0) {
+		hash_bucket_unlock(fields, s);
 		panic("lookup_last failed for va=0x%lx\n", va);
+	}
 #endif
 	/* move last field's values in to the field we are deleting */
-	if (lookup_field != last_field) 
-		tte_hash_set_field(lookup_field, last_field->tte.tag, last_field->tte.data);
+	if (PCPU_GET(lookup_field) != PCPU_GET(last_field)) 
+		tte_hash_set_field((tte_hash_field_t)PCPU_GET(lookup_field), 
+				   ((tte_hash_field_t)PCPU_GET(last_field))->tte.tag, 
+				   ((tte_hash_field_t)PCPU_GET(last_field))->tte.data);
 	
-	tte_hash_set_field(last_field, 0, 0);
+	tte_hash_set_field((tte_hash_field_t)PCPU_GET(last_field), 0, 0);
 done:	
-	hash_bucket_unlock_inline(fields, s);
+	hash_bucket_unlock(fields, s);
+
+	if (tte_data)
+		th->th_entries--;
 
 	return (tte_data);
 }
@@ -472,10 +469,9 @@
 tte_hash_insert(tte_hash_t th, vm_offset_t va, tte_t tte_data)
 {
 
-	uint64_t hash_shift, hash_index, tte_tag;
-	tte_hash_field_t fields, lookup_field;
+	uint64_t hash_shift, hash_index, tte_tag, s;
+	tte_hash_field_t fields, newfield;
 	tte_t otte_data;
-	int s;
 
 	/* XXX - only handle 8K pages for now */
 	hash_shift = PAGE_SHIFT;
@@ -485,29 +481,32 @@
 	tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
 
 	s = hash_bucket_lock(fields);
-	otte_data = tte_hash_lookup_inline(th, va, &lookup_field);
-	if (lookup_field->tte.tag != 0)
-		lookup_field = tte_hash_allocate_fragment_entry(th, lookup_field);
+	otte_data = tte_hash_lookup_inline(fields, va, TRUE);
+	if (((tte_hash_field_t)PCPU_GET(lookup_field))->tte.tag != 0) {
+		hash_bucket_unlock(fields, s);
+		newfield = tte_hash_allocate_fragment_entry(th); 
+		s = hash_bucket_lock(fields);
+		tte_hash_set_field(newfield, 
+				   ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.tag,
+				   ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.data);
+
+		((tte_hash_field_t)PCPU_GET(lookup_field))->of.flags = TH_COLLISION;
+		((tte_hash_field_t)PCPU_GET(lookup_field))->of.next = newfield;
+		otte_data = tte_hash_lookup_inline(fields, va, TRUE);
+	}
 	
 #ifdef DEBUG
 	if (otte_data)
 		panic("mapping for va=0x%lx already exists tte_data=0x%lx\n", va, otte_data);
 #endif
-	tte_hash_set_field(lookup_field, tte_tag, tte_data);
+	tte_hash_set_field((tte_hash_field_t)PCPU_GET(lookup_field), tte_tag, tte_data);
 
-	hash_bucket_unlock_inline(fields, s);
+	hash_bucket_unlock(fields, s);
 	th->th_entries++;
 }
 
 
 
-tte_t 
-tte_hash_lookup_nolock(tte_hash_t th, vm_offset_t va)
-{
-	return tte_hash_lookup_inline(th, va, NULL);
-}
-
-
 /* 
  * If leave_locked is true the tte's data field will be returned to
  * the caller with the hash bucket left locked
@@ -517,10 +516,9 @@
 tte_t 
 tte_hash_lookup(tte_hash_t th, vm_offset_t va)
 {
-	uint64_t hash_shift, hash_index;
+	uint64_t hash_shift, hash_index, s;
 	tte_hash_field_t fields;
 	tte_t tte_data;
-	int s;
 	/* XXX - only handle 8K pages for now */
 
 	hash_shift = PAGE_SHIFT;
@@ -528,8 +526,8 @@
 	fields = (th->th_hashtable[hash_index].the_fields);
 
 	s = hash_bucket_lock(fields);
-	tte_data = tte_hash_lookup_inline(th, va, NULL);
-	hash_bucket_unlock_inline(fields, s);
+	tte_data = tte_hash_lookup_inline(fields, va, FALSE);
+	hash_bucket_unlock(fields, s);
 	
 	return (tte_data);
 }
@@ -541,7 +539,7 @@
 {
 	
 	uint64_t hash_scratch;
-	/* This will break if a hash table ever grows above 64MB
+	/* This will break if a hash table ever grows above 32MB
 	 * 2^(13+13)
 	 */
 	hash_scratch = ((vm_offset_t)th->th_hashtable) | ((vm_offset_t)th->th_size);
@@ -569,26 +567,36 @@
 tte_hash_update(tte_hash_t th, vm_offset_t va, tte_t tte_data)
 {
 	uint64_t hash_shift, hash_index;
-	tte_hash_field_t fields, lookup_field;
+	tte_hash_field_t fields, newfield;
 	tte_t otte_data;
-	uint64_t tag;
-	int s;
+	uint64_t tag, s;
 
 	/* XXX - only handle 8K pages for now */
 	hash_shift = PAGE_SHIFT;
 	hash_index = (va >> hash_shift) & HASH_MASK(th);
 	fields = (th->th_hashtable[hash_index].the_fields);
 
+	tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
+
 	s = hash_bucket_lock(fields);
-	otte_data = tte_hash_lookup_inline(th, va, &lookup_field);
-	if (otte_data == 0 && lookup_field->tte.tag != 0)
-		lookup_field = tte_hash_allocate_fragment_entry(th, lookup_field);
+	otte_data = tte_hash_lookup_inline(fields, va, TRUE);
+
+	if (otte_data == 0 && ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.tag != 0) {
+		hash_bucket_unlock(fields, s);
+		newfield = tte_hash_allocate_fragment_entry(th); 
+		s = hash_bucket_lock(fields);
+		tte_hash_set_field(newfield, 
+				   ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.tag,
+				   ((tte_hash_field_t)PCPU_GET(lookup_field))->tte.data);
 
-	tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
+		((tte_hash_field_t)PCPU_GET(lookup_field))->of.flags = TH_COLLISION;
+		((tte_hash_field_t)PCPU_GET(lookup_field))->of.next = newfield;
+		otte_data = tte_hash_lookup_inline(fields, va, TRUE);
+	}
 	
-	tte_hash_set_field(lookup_field, tag, tte_data);
+	tte_hash_set_field((tte_hash_field_t)PCPU_GET(lookup_field), tag, tte_data);
 
-	hash_bucket_unlock_inline(fields, s);
+	hash_bucket_unlock(fields, s);
 
 	if (otte_data == 0)
 		th->th_entries++;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200605170007.k4H07tYU010598>