Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 06 Jun 2012 16:37:05 +0000
From:      vbotton@FreeBSD.org
To:        svn-soc-all@FreeBSD.org
Subject:   socsvn commit: r237218 - soc2012/vbotton/ntfs_apple
Message-ID:  <20120606163705.E9AFC106566C@hub.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: vbotton
Date: Wed Jun  6 16:37:05 2012
New Revision: 237218
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=237218

Log:
  Change spinlock and rw_lock calls

Modified:
  soc2012/vbotton/ntfs_apple/ntfs_attr.c
  soc2012/vbotton/ntfs_apple/ntfs_attr_list.c
  soc2012/vbotton/ntfs_apple/ntfs_attr_list.h
  soc2012/vbotton/ntfs_apple/ntfs_compress.c
  soc2012/vbotton/ntfs_apple/ntfs_debug.c
  soc2012/vbotton/ntfs_apple/ntfs_dir.c
  soc2012/vbotton/ntfs_apple/ntfs_index.c
  soc2012/vbotton/ntfs_apple/ntfs_index.h
  soc2012/vbotton/ntfs_apple/ntfs_inode.c
  soc2012/vbotton/ntfs_apple/ntfs_lcnalloc.c
  soc2012/vbotton/ntfs_apple/ntfs_logfile.c
  soc2012/vbotton/ntfs_apple/ntfs_mft.c
  soc2012/vbotton/ntfs_apple/ntfs_page.c
  soc2012/vbotton/ntfs_apple/ntfs_quota.c
  soc2012/vbotton/ntfs_apple/ntfs_secure.c
  soc2012/vbotton/ntfs_apple/ntfs_usnjrnl.c
  soc2012/vbotton/ntfs_apple/ntfs_vfsops.c
  soc2012/vbotton/ntfs_apple/ntfs_vnops.c

Modified: soc2012/vbotton/ntfs_apple/ntfs_attr.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_attr.c	Wed Jun  6 15:29:27 2012	(r237217)
+++ soc2012/vbotton/ntfs_apple/ntfs_attr.c	Wed Jun  6 16:37:05 2012	(r237218)
@@ -38,15 +38,12 @@
 #include <sys/errno.h>
 #include <sys/stat.h>
 #include <sys/ucred.h>
-/*#include <sys/ubc.h>*/
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+#include <sys/types.h>
+#include <sys/libkern.h>
 
-#include <string.h>
 
-/*#include <libkern/libkern.h>
-#include <libkern/OSMalloc.h>*/
-
-//#include <kern/debug.h>
-//#include <kern/sched_prim.h>
 
 #include "ntfs.h"
 #include "ntfs_attr.h"
@@ -100,7 +97,7 @@
 		ntfs_debug("Done (resident, nothing to do).");
 		return 0;
 	}
-	lck_rw_lock_exclusive(&ni->rl.lock);
+	rw_wlock(&ni->rl.lock);
 	/* Verify that the runlist is not mapped yet. */
 	if (ni->rl.alloc && ni->rl.elements)
 		panic("%s(): ni->rl.alloc && ni->rl.elements\n", __FUNCTION__);
@@ -165,7 +162,7 @@
 	ntfs_attr_search_ctx_put(ctx);
 	ntfs_mft_record_unmap(base_ni);
 err:
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	if (!err)
 		ntfs_debug("Done.");
 	else
@@ -260,10 +257,10 @@
 			panic("%s(): !a->non_resident\n", __FUNCTION__);
 		ctx_is_temporary = FALSE;
 		end_vcn = le64toh(a->highest_vcn);
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		allocated_size_vcn = ni->allocated_size >>
 				ni->vol->cluster_size_shift;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		/*
 		 * If we already have the attribute extent containing @vcn in
 		 * @ctx, no need to look it up again.  We slightly cheat in
@@ -449,13 +446,13 @@
 		panic("%s(): vcn < 0\n", __FUNCTION__);
 retry_remap:
 	if (!ni->rl.elements) {
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		if (!ni->allocated_size) {
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 			lcn = LCN_ENOENT;
 			goto lcn_enoent;
 		}
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (!is_retry)
 			goto try_to_map;
 		lcn = LCN_EIO;
@@ -465,7 +462,7 @@
 	lcn = ntfs_rl_vcn_to_lcn(ni->rl.rl, vcn, clusters);
 	if (lcn >= LCN_HOLE) {
 		if (need_lock_switch)
-			lck_rw_lock_exclusive_to_shared(&ni->rl.lock);
+			rw_downgrade(&ni->rl.lock);
 		ntfs_debug("Done (lcn 0x%llx, clusters 0x%llx).",
 				(unsigned long long)lcn,
 				clusters ? (unsigned long long)*clusters : 0);
@@ -485,8 +482,8 @@
 			 * fails, need to take the lock for writing and retry
 			 * in case the racing process did the mapping for us.
 			 */
-			if (!lck_rw_lock_shared_to_exclusive(&ni->rl.lock)) {
-				lck_rw_lock_exclusive(&ni->rl.lock);
+			if (!rw_upgrade(&ni->rl.lock)) {
+				rw_wlock(&ni->rl.lock);
 				goto retry_remap;
 			}
 		}
@@ -508,7 +505,7 @@
 	}
 lcn_eio:
 	if (need_lock_switch)
-		lck_rw_lock_exclusive_to_shared(&ni->rl.lock);
+		rw_downgrade(&ni->rl.lock);
 	if (lcn == LCN_ENOENT) {
 lcn_enoent:
 		ntfs_debug("Done (LCN_ENOENT).");
@@ -593,12 +590,12 @@
 		panic("%s(): vcn < 0\n", __FUNCTION__);
 retry_remap:
 	if (!ni->rl.elements) {
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		if (!ni->allocated_size) {
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 			return LCN_ENOENT;
 		}
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (!is_retry)
 			goto try_to_map;
 		err = EIO;
@@ -705,7 +702,7 @@
 {
 	ntfs_attr_search_ctx *ctx;
 
-	ctx = OSMalloc(sizeof(ntfs_attr_search_ctx), ntfs_malloc_tag);
+	ctx = malloc(sizeof(ntfs_attr_search_ctx));
 	if (ctx)
 		ntfs_attr_search_ctx_init(ctx, ni, m);
 	return ctx;
@@ -722,7 +719,7 @@
 {
 	if (ctx->base_ni && ctx->ni != ctx->base_ni)
 		ntfs_extent_mft_record_unmap(ctx->ni);
-	OSFree(ctx, sizeof(ntfs_attr_search_ctx), ntfs_malloc_tag);
+	free(ctx);
 }
 
 /**
@@ -2087,9 +2084,9 @@
 			goto restart_compressed_size_add;
 		}
 		/* Move the attribute to an extent mft record. */
-		lck_rw_lock_shared(&base_ni->attr_list_rl.lock);
+		rw_rlock(&base_ni->attr_list_rl.lock);
 		err = ntfs_attr_record_move(ctx);
-		lck_rw_unlock_shared(&base_ni->attr_list_rl.lock);
+		rw_unlock(&base_ni->attr_list_rl.lock);
 		if (err) {
 			ntfs_error(vol->mp, "Failed to move attribute extent "
 					"from mft record 0x%llx to an extent "
@@ -2229,13 +2226,13 @@
 	 * compressed sizes is sufficient in which case we can save a few CPU
 	 * cycles by not updating the data and initialized sizes here.
 	 */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	a->allocated_size = cpu_to_sle64(ni->allocated_size);
 	a->data_size = cpu_to_sle64(ni->data_size);
 	a->initialized_size = cpu_to_sle64(ni->initialized_size);
 	if (a->flags & (ATTR_IS_COMPRESSED | ATTR_IS_SPARSE))
 		a->compressed_size = cpu_to_sle64(ni->compressed_size);
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/*
 	 * If the current mapping pairs array is valid and the first vcn at
 	 * which we need to update the mapping pairs array is not in this
@@ -2966,12 +2963,12 @@
 	 * The size needs to be aligned to a cluster boundary for allocation
 	 * purposes.
 	 */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	data_size = ni->data_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	new_size = (data_size + vol->cluster_size_mask) &
 			~vol->cluster_size_mask;
-	lck_rw_lock_exclusive(&ni->rl.lock);
+	rw_wlock(&ni->rl.lock);
 	if (ni->rl.elements)
 		panic("%s(): ni->rl.elements\n", __FUNCTION__);
 	upl = NULL;
@@ -3169,9 +3166,9 @@
 	type = ni->type;
 	if (type != AT_STANDARD_INFORMATION && type != AT_INDEX_ROOT &&
 			(type != AT_DATA || ni->name_len)) {
-		lck_rw_lock_shared(&base_ni->attr_list_rl.lock);
+		rw_rlock(&base_ni->attr_list_rl.lock);
 		err = ntfs_attr_record_move(&ctx);
-		lck_rw_unlock_shared(&base_ni->attr_list_rl.lock);
+		rw_unlock(&base_ni->attr_list_rl.lock);
 		if (!err) {
 			/* The attribute has moved so update our variables. */
 			m = ctx.m;
@@ -3441,7 +3438,7 @@
 	if (err)
 		panic("%s(): err\n", __FUNCTION__);
 	/* Setup the in-memory attribute structure to be non-resident. */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->allocated_size = new_size;
 	if (NInoSparse(ni) || NInoCompressed(ni)) {
 		ni->compressed_size = ni->allocated_size;
@@ -3459,7 +3456,7 @@
 			ni->compression_block_clusters = 0;
 		}
 	}
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/*
 	 * This needs to be last since we are not allowed to fail once we flip
 	 * this switch.
@@ -3485,7 +3482,7 @@
 		NInoSetMrecNeedsDirtying(base_ni);
 	}
 	ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	/*
 	 * We have modified the allocated size.  If the ntfs inode is the base
 	 * inode, cause the sizes to be written to all the directory index
@@ -3545,7 +3542,7 @@
 			panic("%s(): err2\n", __FUNCTION__);
 	}
 unl_err:
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	if (err == EINVAL)
 		err = EIO;
 	return err;
@@ -3922,7 +3919,7 @@
 	BOOL data_size_updated = FALSE;
 
 #ifdef DEBUG
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ntfs_debug("Entering for mft_no 0x%llx, attribute type 0x%x, old data "
 			"size 0x%llx, old initialized size 0x%llx, new "
 			"initialized size 0x%llx.",
@@ -3931,7 +3928,7 @@
 			(unsigned long long)ni->data_size,
 			(unsigned long long)ni->initialized_size,
 			(unsigned long long)new_init_size);
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 #endif /* DEBUG */
 	base_ni = ni;
 	if (NInoAttr(ni))
@@ -3953,7 +3950,7 @@
 		goto put_err;
 	}
 	a = ctx->a;
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	if (new_init_size >= 0) {
 		if (new_init_size < ni->initialized_size)
 			panic("%s(): new_init_size < ni->initialized_size\n",
@@ -4001,17 +3998,17 @@
 			panic("%s(): !NInoNonResident(ni)\n", __FUNCTION__);
 		a->initialized_size = cpu_to_sle64(new_init_size);
 	}
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/*
 	 * If this is a directory B+tree index allocation attribute also update
 	 * the sizes in the base inode.
 	 */
 	if (ni->name == I30 && ni->type == AT_INDEX_ALLOCATION) {
-		lck_spin_lock(&base_ni->size_lock);
+		mtx_lock_spin(&base_ni->size_lock);
 		if (data_size_updated)
 			base_ni->data_size = new_init_size;
 		base_ni->initialized_size = new_init_size;
-		lck_spin_unlock(&base_ni->size_lock);
+		mtx_unlock_spin(&base_ni->size_lock);
 	}
 	/* Mark the mft record dirty to ensure it gets written out. */
 	NInoSetMrecNeedsDirtying(ctx->ni);
@@ -4083,13 +4080,13 @@
 	unsigned attr_len;
 	BOOL locked, write_locked, is_sparse, mark_sizes_dirty;
 
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	if (new_init_size > ni->allocated_size)
 		panic("%s(): new_init_size > ni->allocated_size\n",
 				__FUNCTION__);
 	size = ni->data_size;
 	old_init_size = ni->initialized_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	if (new_init_size <= old_init_size)
 		panic("%s(): new_init_size <= old_init_size\n",
 				__FUNCTION__);
@@ -4140,9 +4137,9 @@
 	bzero(kattr + attr_len, new_init_size - attr_len);
 	a->value_length = cpu_to_le32((u32)new_init_size);
 	/* Update the sizes in the ntfs inode as well as the ubc size. */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->initialized_size = ni->data_size = size = new_init_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/* Mark the mft record dirty to ensure it gets written out. */
 	NInoSetMrecNeedsDirtying(ctx->ni);
 	ntfs_attr_search_ctx_put(ctx);
@@ -4179,9 +4176,9 @@
 			panic("%s(): size != le64toh(a->data_size)\n",
 					__FUNCTION__);
 		size = new_init_size;
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		ni->data_size = new_init_size;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		a->data_size = cpu_to_sle64(new_init_size);
 		/* Mark the mft record dirty to ensure it gets written out. */
 		NInoSetMrecNeedsDirtying(ctx->ni);
@@ -4220,7 +4217,7 @@
 		BOOL have_holes = FALSE;
 
 		locked = TRUE;
-		lck_rw_lock_shared(&ni->rl.lock);
+		rw_rlock(&ni->rl.lock);
 		vcn = ofs >> vol->cluster_size_shift;
 		end_vcn = (new_init_size + vol->cluster_size_mask) >>
 				vol->cluster_size_shift;
@@ -4230,9 +4227,9 @@
 map_vcn:
 			if (!write_locked) {
 				write_locked = TRUE;
-				if (!lck_rw_lock_shared_to_exclusive(
+				if (!rw_upgrade(
 						&ni->rl.lock)) {
-					lck_rw_lock_exclusive(&ni->rl.lock);
+					rw_wlock(&ni->rl.lock);
 					goto retry_remap;
 				}
 			}
@@ -4287,9 +4284,9 @@
 					if (ofs > old_init_size) {
 						if (ofs > new_init_size)
 							ofs = new_init_size;
-						lck_spin_lock(&ni->size_lock);
+						mtx_lock_spin(&ni->size_lock);
 						ni->initialized_size = ofs;
-						lck_spin_unlock(&ni->size_lock);
+						mtx_unlock_spin(&ni->size_lock);
 						if (ofs == new_init_size)
 							goto update_done;
 					}
@@ -4308,7 +4305,7 @@
 		 */
 		if (have_holes) {
 			if (write_locked) {
-				lck_rw_lock_exclusive_to_shared(&ni->rl.lock);
+				rw_downgrade(&ni->rl.lock);
 				write_locked = FALSE;
 			}
 			/*
@@ -4322,9 +4319,9 @@
 			rl = ni->rl.rl;
 		} else {
 			if (write_locked)
-				lck_rw_unlock_exclusive(&ni->rl.lock);
+				rw_unlock(&ni->rl.lock);
 			else
-				lck_rw_unlock_shared(&ni->rl.lock);
+				rw_unlock(&ni->rl.lock);
 			locked = FALSE;
 			is_sparse = FALSE;
 		}
@@ -4399,9 +4396,9 @@
 			 */
 			if (ofs > new_init_size)
 				ofs = new_init_size;
-			lck_spin_lock(&ni->size_lock);
+			mtx_lock_spin(&ni->size_lock);
 			ni->initialized_size = ofs;
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 		} else /* if (!is_sparse) */ {
 			upl_t upl;
 			upl_page_info_array_t pl;
@@ -4427,7 +4424,7 @@
 // not.  Or perhaps just remove the warning and use this as the solution.
 			if (locked && write_locked) {
 				write_locked = FALSE;
-				lck_rw_lock_exclusive_to_shared(&ni->rl.lock);
+				rw_downgrade(&ni->rl.lock);
 				ntfs_warning(vol->mp, "Switching runlist lock "
 						"to shared to avoid "
 						"deadlock.");
@@ -4442,25 +4439,25 @@
 			ofs += PAGE_SIZE;
 			if (ofs > new_init_size)
 				ofs = new_init_size;
-			lck_spin_lock(&ni->size_lock);
+			mtx_lock_spin(&ni->size_lock);
 			ni->initialized_size = ofs;
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 			/* Set the page dirty so it gets written out. */
 			ntfs_page_unmap(ni, upl, pl, TRUE);
 		}
 	} while (ofs < new_init_size);
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	if (ni->initialized_size != new_init_size)
 		panic("%s(): ni->initialized_size != new_init_size\n",
 				__FUNCTION__);
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 update_done:
 	/* If we are holding the runlist lock, release it now. */
 	if (locked) {
 		if (write_locked)
-			lck_rw_unlock_exclusive(&ni->rl.lock);
+			rw_unlock(&ni->rl.lock);
 		else
-			lck_rw_unlock_shared(&ni->rl.lock);
+			rw_unlock(&ni->rl.lock);
 		locked = FALSE;
 	}
 	/* Bring up to date the initialized_size in the attribute record. */
@@ -4486,13 +4483,13 @@
 unl_err:
 	if (locked) {
 		if (write_locked)
-			lck_rw_unlock_exclusive(&ni->rl.lock);
+			rw_unlock(&ni->rl.lock);
 		else
-			lck_rw_unlock_shared(&ni->rl.lock);
+			rw_unlock(&ni->rl.lock);
 	}
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->initialized_size = old_init_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	goto err;
 put_err:
 	ntfs_attr_search_ctx_put(ctx);
@@ -4637,9 +4634,9 @@
 	 * enough space to add the compressed size to the attribute record.
 	 */
 	if (!ntfs_attr_record_is_only_one(m, a)) {
-		lck_rw_lock_shared(&base_ni->attr_list_rl.lock);
+		rw_rlock(&base_ni->attr_list_rl.lock);
 		err = ntfs_attr_record_move(ctx);
-		lck_rw_unlock_shared(&base_ni->attr_list_rl.lock);
+		rw_unlock(&base_ni->attr_list_rl.lock);
 		if (err) {
 			ntfs_error(vol->mp, "Failed to move attribute extent "
 					"from mft record 0x%llx to an extent "
@@ -4738,10 +4735,10 @@
 				ffs(ni->compression_block_size) - 1;
 		ni->compression_block_clusters = 1U << NTFS_COMPRESSION_UNIT;
 	}
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->compressed_size = ni->allocated_size;
 	a->compressed_size = a->allocated_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 is_compressed:
 	/* Mark both the attribute and the ntfs inode as sparse. */
 	a->flags |= ATTR_IS_SPARSE;
@@ -5111,9 +5108,9 @@
 				sizeof(a->compressed_size));
 		/* Set the compression unit to 0. */
 		a->compression_unit = 0;
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		ni->compressed_size = 0;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		/* Clear the other related fields. */
 		ni->compression_block_size = 0;
 		ni->compression_block_clusters =
@@ -5219,7 +5216,7 @@
 		base_ni = ni->base_ni;
 	if (!new_end)
 		atomic = TRUE;
-	lck_rw_lock_shared(&ni->rl.lock);
+	rw_rlock(&ni->rl.lock);
 	write_locked = FALSE;
 	/*
 	 * We have to round down @start to the nearest page boundary and we
@@ -5234,11 +5231,11 @@
 	end_vcn = ((end + PAGE_MASK) & ~PAGE_MASK_64) >>
 			vol->cluster_size_shift;
 	/* Cache the sizes for the attribute so we take the size lock once. */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	allocated_size = ni->allocated_size;
 	initialized_size = ni->initialized_size;
 	compressed_size = ni->compressed_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/*
 	 * We have to make sure that we stay within the existing allocated
 	 * size when instantiating holes as it would corrupt the attribute if
@@ -5254,8 +5251,8 @@
 map_vcn:
 		if (!write_locked) {
 			write_locked = TRUE;
-			if (!lck_rw_lock_shared_to_exclusive(&ni->rl.lock)) {
-				lck_rw_lock_exclusive(&ni->rl.lock);
+			if (!rw_upgrade(&ni->rl.lock)) {
+				rw_wlock(&ni->rl.lock);
 				goto retry_remap;
 			}
 		}
@@ -5317,8 +5314,8 @@
 		 */
 		if (!write_locked) {
 			write_locked = TRUE;
-			if (!lck_rw_lock_shared_to_exclusive(&ni->rl.lock)) {
-				lck_rw_lock_exclusive(&ni->rl.lock);
+			if (!rw_upgrade(&ni->rl.lock)) {
+				rw_wlock(&ni->rl.lock);
 				goto retry_remap;
 			}
 		}
@@ -5510,10 +5507,10 @@
 		 * update the compressed size.
 		 */
 		if (NInoSparse(ni) || NInoCompressed(ni)) {
-			lck_spin_lock(&ni->size_lock);
+			mtx_lock_spin(&ni->size_lock);
 			ni->compressed_size = compressed_size;
 			a->compressed_size = cpu_to_sle64(compressed_size);
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 		}
 		/*
 		 * If this is the unnamed $DATA attribute also need to update
@@ -5619,9 +5616,9 @@
 		 * number of extent attribute records needed to a minimum.
 		 */
 		if (!ntfs_attr_record_is_only_one(m, a)) {
-			lck_rw_lock_shared(&base_ni->attr_list_rl.lock);
+			rw_rlock(&base_ni->attr_list_rl.lock);
 			err = ntfs_attr_record_move(ctx);
-			lck_rw_unlock_shared(&base_ni->attr_list_rl.lock);
+			rw_unlock(&base_ni->attr_list_rl.lock);
 			if (err) {
 				ntfs_error(vol->mp, "Failed to move attribute "
 						"extent from mft record "
@@ -5769,9 +5766,9 @@
 	if (new_end)
 		*new_end = vcn << vol->cluster_size_shift;
 	if (write_locked)
-		lck_rw_unlock_exclusive(&ni->rl.lock);
+		rw_unlock(&ni->rl.lock);
 	else
-		lck_rw_unlock_shared(&ni->rl.lock);
+		rw_unlock(&ni->rl.lock);
 	return err;
 undo_alloc:
 	err2 = ntfs_cluster_free_from_rl(vol, runlist.rl, 0, -1, NULL);
@@ -5826,10 +5823,10 @@
 	}
 	/* Restore the compressed size to the old value. */
 	compressed_size -= len << vol->cluster_size_shift;
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->compressed_size = compressed_size;
 	a->compressed_size = cpu_to_sle64(compressed_size);
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/* Ensure the modified mft record is written out. */
 	NInoSetMrecNeedsDirtying(ctx->ni);
 	if (ni == base_ni)
@@ -5961,9 +5958,9 @@
 
 	start = data_start;
 #ifdef DEBUG
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	old_alloc_size = ni->allocated_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	ntfs_debug("Entering for mft_no 0x%llx, attribute type 0x%x, "
 			"old_allocated_size 0x%llx, "
 			"new_allocated_size 0x%llx, new_data_size 0x%llx, "
@@ -6000,9 +5997,9 @@
 	err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
 	if (err) {
 		/* Only emit errors when the write will fail completely. */
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		old_alloc_size = ni->allocated_size;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (start < 0 || start >= old_alloc_size) {
 			if (err == ERANGE) {
 				ntfs_error(vol->mp, "Cannot extend allocation "
@@ -6038,7 +6035,7 @@
 	 * We will be modifying both the runlist (if non-resident) and the mft
 	 * record so lock them both down.
 	 */
-	lck_rw_lock_exclusive(&ni->rl.lock);
+	rw_wlock(&ni->rl.lock);
 	err = ntfs_mft_record_map(base_ni, &base_m);
 	if (err) {
 		base_m = NULL;
@@ -6050,9 +6047,9 @@
 		err = ENOMEM;
 		goto err_out;
 	}
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	alloc_size = ni->allocated_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/*
 	 * If non-resident, seek to the last extent.  If resident, there is
 	 * only one extent, so seek to that.
@@ -6116,11 +6113,11 @@
 				 * This cannot fail as it is a shrinking
 				 * resize.
 				 */
-				lck_spin_lock(&ni->size_lock);
+				mtx_lock_spin(&ni->size_lock);
 				err = ntfs_attr_record_resize(m, a,
 						le16toh(a->value_offset) +
 						ni->allocated_size);
-				lck_spin_unlock(&ni->size_lock);
+				mtx_unlock_spin(&ni->size_lock);
 				if (err)
 					panic("%s(): Failed to shrink "
 							"resident attribute "
@@ -6132,14 +6129,14 @@
 			/* Zero the extended attribute value. */
 			bzero((u8*)a + le16toh(a->value_offset) + attr_len,
 					(u32)new_data_size - attr_len);
-			lck_spin_lock(&ni->size_lock);
+			mtx_lock_spin(&ni->size_lock);
 			ni->initialized_size = ni->data_size = new_data_size;
 			a->value_length = cpu_to_le32((u32)new_data_size);
 		} else
-			lck_spin_lock(&ni->size_lock);
+			mtx_lock_spin(&ni->size_lock);
 		ni->allocated_size = le32toh(a->length) -
 				le16toh(a->value_offset);
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (new_data_size > attr_len)
 			a->value_length = cpu_to_le32((u32)new_data_size);
 		goto dirty_done;
@@ -6150,7 +6147,7 @@
 	 */
 	ntfs_attr_search_ctx_put(actx);
 	ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	/*
 	 * Not enough space in the mft record, try to make the attribute
 	 * non-resident and if successful restart the extension process.
@@ -6170,9 +6167,9 @@
 			 * Only emit errors when the write will fail
 			 * completely.
 			 */
-			lck_spin_lock(&ni->size_lock);
+			mtx_lock_spin(&ni->size_lock);
 			old_alloc_size = ni->allocated_size;
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 			if (start < 0 || start >= old_alloc_size)
 				ntfs_error(vol->mp, "Cannot extend allocation "
 						"of mft_no 0x%llx, attribute "
@@ -6221,9 +6218,9 @@
 	 */
 	if (arec_size > vol->mft_record_size - sizeof(MFT_RECORD)) {
 		/* Only emit errors when the write will fail completely. */
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		old_alloc_size = ni->allocated_size;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (start < 0 || start >= old_alloc_size)
 			ntfs_error(vol->mp, "Cannot extend allocation of "
 					"mft_no 0x%llx, attribute type 0x%x, "
@@ -6625,7 +6622,7 @@
 			NVolSetErrors(vol);
 			goto err_out;
 		}
-		lck_rw_unlock_exclusive(&ni->rl.lock);
+		rw_unlock(&ni->rl.lock);
 		/* Find the index root by walking up the tree path. */
 		root_ictx = ictx;
 		while (!root_ictx->is_root) {
@@ -6793,9 +6790,9 @@
 		 */
 		if (!ntfs_attr_record_is_only_one(m, a)) {
 move_attr:
-			lck_rw_lock_shared(&base_ni->attr_list_rl.lock);
+			rw_rlock(&base_ni->attr_list_rl.lock);
 			err = ntfs_attr_record_move(actx);
-			lck_rw_unlock_shared(&base_ni->attr_list_rl.lock);
+			rw_unlock(&base_ni->attr_list_rl.lock);
 			if (err) {
 				if (start < 0 || start >= alloc_size)
 					ntfs_error(vol->mp, "Failed to move "
@@ -7333,7 +7330,7 @@
 		 */
 		a = actx->a;
 	}
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->allocated_size = new_alloc_size;
 	a->allocated_size = cpu_to_sle64(new_alloc_size);
 	if (NInoSparse(ni) || (ni->type != AT_INDEX_ALLOCATION &&
@@ -7341,11 +7338,11 @@
 		ni->compressed_size += nr_allocated << vol->cluster_size_shift;
 		a->compressed_size = cpu_to_sle64(ni->compressed_size);
 	}
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	if (ni->name == I30 && ni->type == AT_INDEX_ALLOCATION) {
-		lck_spin_lock(&base_ni->size_lock);
+		mtx_lock_spin(&base_ni->size_lock);
 		base_ni->allocated_size = new_alloc_size;
-		lck_spin_unlock(&base_ni->size_lock);
+		mtx_unlock_spin(&base_ni->size_lock);
 	}
 alloc_done:
 	if (new_data_size > le64toh(a->data_size)) {
@@ -7363,14 +7360,14 @@
 			 */ 
 			err = EIO;
 		}
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		ni->data_size = new_data_size;
 		a->data_size = cpu_to_sle64(new_data_size);
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (ni->name == I30 && ni->type == AT_INDEX_ALLOCATION) {
-			lck_spin_lock(&base_ni->size_lock);
+			mtx_lock_spin(&base_ni->size_lock);
 			base_ni->data_size = new_data_size;
-			lck_spin_unlock(&base_ni->size_lock);
+			mtx_unlock_spin(&base_ni->size_lock);
 		}
 	}
 dirty_done:
@@ -7388,7 +7385,7 @@
 done:
 	ntfs_attr_search_ctx_put(actx);
 	ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	ntfs_debug("Done, new_allocated_size 0x%llx.",
 			(unsigned long long)new_alloc_size);
 	if (dst_alloc_size)
@@ -7515,12 +7512,12 @@
 		}
 	}
 undo_do_trunc:
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	if (alloc_size == ni->allocated_size) {
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		goto undo_skip_update_sizes;
 	}
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	ntfs_attr_search_ctx_reinit(actx);
 	/* Look up the first attribute extent. */
 	if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 0, NULL, 0,
@@ -7533,7 +7530,7 @@
 		goto err_out;
 	}
 	a = actx->a;
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->allocated_size = alloc_size;
 	a->allocated_size = cpu_to_sle64(alloc_size);
 	if (NInoSparse(ni) || (ni->type != AT_INDEX_ALLOCATION &&
@@ -7542,11 +7539,11 @@
 				vol->cluster_size_shift;
 		a->compressed_size = cpu_to_sle64(ni->compressed_size);
 	}
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	if (ni->name == I30 && ni->type == AT_INDEX_ALLOCATION) {
-		lck_spin_lock(&base_ni->size_lock);
+		mtx_lock_spin(&base_ni->size_lock);
 		base_ni->allocated_size = alloc_size;
-		lck_spin_unlock(&base_ni->size_lock);
+		mtx_unlock_spin(&base_ni->size_lock);
 	}
 	/* Ensure the changes make it to disk. */
 	if (actx->ni != base_ni)
@@ -7564,7 +7561,7 @@
 	ntfs_attr_search_ctx_put(actx);
 	NInoSetMrecNeedsDirtying(base_ni);
 	ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	/*
 	 * Things are now consistent, try to truncate the attribute back to its
 	 * old size which will cause the allocation to be restored to its old
@@ -7577,9 +7574,9 @@
 	 * the size in the vnode @ni->vn via ubc_setsize().
 	 */
 	if (!is_first) {
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		ll = ni->data_size;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (ntfs_attr_resize(ni, ll, 0, ictx)) {
 			ntfs_error(vol->mp, "Failed to undo partial "
 					"allocation in inode 0x%llx in error "
@@ -7596,7 +7593,7 @@
 		ntfs_attr_search_ctx_put(actx);
 	if (base_m)
 		ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	goto conv_err_out;
 trunc_err_out:
 	mp_rebuilt = FALSE;
@@ -7719,7 +7716,7 @@
 	 * Lock the runlist for writing and map the mft record to ensure it is
 	 * safe to modify the attribute runlist and sizes.
 	 */
-	lck_rw_lock_exclusive(&ni->rl.lock);
+	rw_wlock(&ni->rl.lock);
 	err = ntfs_mft_record_map(base_ni, &m);
 	if (err) {
 		ntfs_error(vol->mp, "Failed to map mft record for mft_no "
@@ -7765,10 +7762,10 @@
 	else
 		new_alloc_size = (new_size + 7) & ~7;
 	/* The current allocated size is the old allocated size. */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	old_alloc_size = ni->allocated_size;
 	compressed_size = ni->compressed_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	/*
 	 * The change in the file size.  This will be 0 if no change, >0 if the
 	 * size is growing, and <0 if the size is shrinking.
@@ -7857,7 +7854,7 @@
 			!ntfs_resident_attr_value_resize(m, a, new_size)) {
 		/* The resize succeeded! */
 		NInoSetMrecNeedsDirtying(actx->ni);
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		/* Update the sizes in the ntfs inode and all is done. */
 		ni->allocated_size = le32toh(a->length) -
 				le16toh(a->value_offset);
@@ -7889,7 +7886,7 @@
 		 * deadlocks.
 		 */
 		ni->initialized_size = new_size;
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		goto unm_done;
 	}
 	/* If the above resize failed, this must be an attribute extension. */
@@ -7976,7 +7973,7 @@
 			panic("%s(): ictx->is_locked\n", __FUNCTION__);
 		if (ictx->is_root)
 			panic("%s(): ictx->is_root\n", __FUNCTION__);
-		lck_rw_unlock_exclusive(&ni->rl.lock);
+		rw_unlock(&ni->rl.lock);
 		/* Find the index root by walking up the tree path. */
 		root_ictx = ictx;
 		while (!root_ictx->is_root) {
@@ -8093,7 +8090,7 @@
 	 */
 	ntfs_attr_search_ctx_put(actx);
 	ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	/*
 	 * Not enough space in the mft record, try to make the attribute
 	 * non-resident and if successful restart the truncation process.
@@ -8186,19 +8183,19 @@
 		 * Make the valid size smaller (the UBC size is already
 		 * up-to-date).
 		 */
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		if (new_size < ni->initialized_size) {
 			ni->initialized_size = new_size;
 			a->initialized_size = cpu_to_sle64(new_size);
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 			if (ni->name == I30 &&
 					ni->type == AT_INDEX_ALLOCATION) {
-				lck_spin_lock(&base_ni->size_lock);
+				mtx_lock_spin(&base_ni->size_lock);
 				base_ni->initialized_size = new_size;
-				lck_spin_unlock(&base_ni->size_lock);
+				mtx_unlock_spin(&base_ni->size_lock);
 			}
 		} else
-			lck_spin_unlock(&ni->size_lock);
+			mtx_unlock_spin(&ni->size_lock);
 		/*
 		 * If the size is shrinking it makes no sense for the
 		 * allocation to be growing.
@@ -8233,7 +8230,7 @@
 		 */
 		ntfs_attr_search_ctx_put(actx);
 		ntfs_mft_record_unmap(base_ni);
-		lck_rw_unlock_exclusive(&ni->rl.lock);
+		rw_unlock(&ni->rl.lock);
 		err = ntfs_attr_extend_allocation(ni, new_size,
 				size_change > 0 ? new_size : -1, -1, ictx,
 				NULL, FALSE);
@@ -8244,14 +8241,14 @@
 	/* alloc_change <= 0 */
 	/* If the actual size is changing need to update it now. */
 	if (size_change) {
-		lck_spin_lock(&ni->size_lock);
+		mtx_lock_spin(&ni->size_lock);
 		ni->data_size = new_size;
 		a->data_size = cpu_to_sle64(new_size);
-		lck_spin_unlock(&ni->size_lock);
+		mtx_unlock_spin(&ni->size_lock);
 		if (ni->name == I30 && ni->type == AT_INDEX_ALLOCATION) {
-			lck_spin_lock(&base_ni->size_lock);
+			mtx_lock_spin(&base_ni->size_lock);
 			base_ni->data_size = new_size;
-			lck_spin_unlock(&base_ni->size_lock);
+			mtx_unlock_spin(&base_ni->size_lock);
 		}
 	}
 	/* Ensure the modified mft record is written out. */
@@ -8334,7 +8331,7 @@
 		ntfs_attr_sparse_clear(base_ni, ni, actx);
 	}
 	/* Update the allocated/compressed size. */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	ni->allocated_size = new_alloc_size;
 	a->allocated_size = cpu_to_sle64(new_alloc_size);
 	if (NInoSparse(ni) || (ni->type != AT_INDEX_ALLOCATION &&
@@ -8347,11 +8344,11 @@
 			a->compressed_size = cpu_to_sle64(ni->compressed_size);
 		}
 	}
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	if (ni->name == I30 && ni->type == AT_INDEX_ALLOCATION) {
-		lck_spin_lock(&base_ni->size_lock);
+		mtx_lock_spin(&base_ni->size_lock);
 		base_ni->allocated_size = new_alloc_size;
-		lck_spin_unlock(&base_ni->size_lock);
+		mtx_unlock_spin(&base_ni->size_lock);
 	}
 	/*
 	 * We have the base attribute extent in @actx and we have set it up
@@ -8689,7 +8686,7 @@
 unm_done:
 	ntfs_attr_search_ctx_put(actx);
 	ntfs_mft_record_unmap(base_ni);
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 	/* Set the UBC size if not set yet. */
 	if (need_ubc_setsize && !ubc_setsize(ni->vn, new_size)) {
 		ntfs_error(vol->mp, "Failed to set the size in UBC.");
@@ -8771,7 +8768,7 @@
 unm_err:
 	ntfs_mft_record_unmap(base_ni);
 unl_err:
-	lck_rw_unlock_exclusive(&ni->rl.lock);
+	rw_unlock(&ni->rl.lock);
 err:
 	/* Reset the UBC size. */
 	if (!ubc_setsize(ni->vn, old_size))
@@ -8840,9 +8837,9 @@
 	end = ofs + cnt;
 	end_ofs = (unsigned)end & PAGE_MASK;
 	/* If the end is outside the inode size return ESPIPE. */
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	data_size = ni->data_size;
-	lck_spin_unlock(&ni->size_lock);
+	mtx_unlock_spin(&ni->size_lock);
 	if (end > data_size) {
 		ntfs_error(vol->mp, "Request exceeds end of attribute.");
 		return ESPIPE;
@@ -8971,7 +8968,7 @@
 		goto put_err;
 	}
 	a = ctx->a;
-	lck_spin_lock(&ni->size_lock);
+	mtx_lock_spin(&ni->size_lock);
 	/* These can happen when we race with a shrinking truncate. */
 	attr_len = le32toh(a->value_length);
 	if (attr_len > ni->data_size)
@@ -8982,7 +8979,7 @@
 	init_len = attr_len;
 	if (init_len > ni->initialized_size)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20120606163705.E9AFC106566C>