Date: Wed, 1 Aug 2018 03:03:15 +0000 (UTC) From: Alexander Motin <mav@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org Subject: svn commit: r337027 - vendor-sys/illumos/dist/uts/common/fs/zfs Message-ID: <201808010303.w7133FfN037368@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mav Date: Wed Aug 1 03:03:15 2018 New Revision: 337027 URL: https://svnweb.freebsd.org/changeset/base/337027 Log: 9328 zap code can take advantage of c99 9329 panic in zap_leaf_lookup() due to concurrent zapification illumos/illumos-gate@bf26014c5541b6119f34e0d95294b7f2eb105ac2 Reviewed by: Steve Gonczi <steve.gonczi@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com> Reviewed by: Brad Lewis <brad.lewis@delphix.com> Approved by: Dan McDonald <danmcd@joyent.com> Author: Matthew Ahrens <mahrens@delphix.com> Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/dmu_object.c vendor-sys/illumos/dist/uts/common/fs/zfs/zap.c vendor-sys/illumos/dist/uts/common/fs/zfs/zap_leaf.c vendor-sys/illumos/dist/uts/common/fs/zfs/zap_micro.c Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/dmu_object.c ============================================================================== --- vendor-sys/illumos/dist/uts/common/fs/zfs/dmu_object.c Wed Aug 1 02:59:56 2018 (r337026) +++ vendor-sys/illumos/dist/uts/common/fs/zfs/dmu_object.c Wed Aug 1 03:03:15 2018 (r337027) @@ -20,7 +20,7 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2013, 2015 by Delphix. All rights reserved. + * Copyright (c) 2013, 2017 by Delphix. All rights reserved. * Copyright 2014 HybridCluster. All rights reserved. */ @@ -204,12 +204,18 @@ dmu_object_zapify(objset_t *mos, uint64_t object, dmu_ } ASSERT3U(dn->dn_type, ==, old_type); ASSERT0(dn->dn_maxblkid); + + /* + * We must initialize the ZAP data before changing the type, + * so that concurrent calls to *_is_zapified() can determine if + * the object has been completely zapified by checking the type. + */ + mzap_create_impl(mos, object, 0, 0, tx); + dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type = DMU_OTN_ZAP_METADATA; dnode_setdirty(dn, tx); dnode_rele(dn, FTAG); - - mzap_create_impl(mos, object, 0, 0, tx); spa_feature_incr(dmu_objset_spa(mos), SPA_FEATURE_EXTENSIBLE_DATASET, tx); Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/zap.c ============================================================================== --- vendor-sys/illumos/dist/uts/common/fs/zfs/zap.c Wed Aug 1 02:59:56 2018 (r337026) +++ vendor-sys/illumos/dist/uts/common/fs/zfs/zap.c Wed Aug 1 03:03:15 2018 (r337027) @@ -58,10 +58,8 @@ static uint64_t zap_allocate_blocks(zap_t *zap, int nb void fzap_byteswap(void *vbuf, size_t size) { - uint64_t block_type; + uint64_t block_type = *(uint64_t *)vbuf; - block_type = *(uint64_t *)vbuf; - if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF)) zap_leaf_byteswap(vbuf, size); else { @@ -73,11 +71,6 @@ fzap_byteswap(void *vbuf, size_t size) void fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags) { - dmu_buf_t *db; - zap_leaf_t *l; - int i; - zap_phys_t *zp; - ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); zap->zap_ismicro = FALSE; @@ -87,7 +80,7 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t fla mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0); zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1; - zp = zap_f_phys(zap); + zap_phys_t *zp = zap_f_phys(zap); /* * explicitly zero it since it might be coming from an * initialized microzap @@ -106,17 +99,18 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t fla zp->zap_flags = flags; /* block 1 will be the first leaf */ - for (i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++) + for (int i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++) ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1; /* * set up block 1 - the first leaf */ - VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + dmu_buf_t *db; + VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object, 1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH)); dmu_buf_will_dirty(db, tx); - l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); + zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); l->l_dbuf = db; zap_leaf_init(l, zp->zap_normflags != 0); @@ -146,9 +140,7 @@ zap_table_grow(zap_t *zap, zap_table_phys_t *tbl, void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n), dmu_tx_t *tx) { - uint64_t b, newblk; - dmu_buf_t *db_old, *db_new; - int err; + uint64_t newblk; int bs = FZAP_BLOCK_SHIFT(zap); int hepb = 1<<(bs-4); /* hepb = half the number of entries in a block */ @@ -172,21 +164,23 @@ zap_table_grow(zap_t *zap, zap_table_phys_t *tbl, * Copy the ptrtbl from the old to new location. */ - b = tbl->zt_blks_copied; - err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + uint64_t b = tbl->zt_blks_copied; + dmu_buf_t *db_old; + int err = dmu_buf_hold(zap->zap_objset, zap->zap_object, (tbl->zt_blk + b) << bs, FTAG, &db_old, DMU_READ_NO_PREFETCH); - if (err) + if (err != 0) return (err); /* first half of entries in old[b] go to new[2*b+0] */ - VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + dmu_buf_t *db_new; + VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object, (newblk + 2*b+0) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH)); dmu_buf_will_dirty(db_new, tx); transfer_func(db_old->db_data, db_new->db_data, hepb); dmu_buf_rele(db_new, FTAG); /* second half of entries in old[b] go to new[2*b+1] */ - VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object, (newblk + 2*b+1) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH)); dmu_buf_will_dirty(db_new, tx); transfer_func((uint64_t *)db_old->db_data + hepb, @@ -221,22 +215,20 @@ static int zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val, dmu_tx_t *tx) { - int err; - uint64_t blk, off; int bs = FZAP_BLOCK_SHIFT(zap); - dmu_buf_t *db; ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); ASSERT(tbl->zt_blk != 0); dprintf("storing %llx at index %llx\n", val, idx); - blk = idx >> (bs-3); - off = idx & ((1<<(bs-3))-1); + uint64_t blk = idx >> (bs-3); + uint64_t off = idx & ((1<<(bs-3))-1); - err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + dmu_buf_t *db; + int err = dmu_buf_hold(zap->zap_objset, zap->zap_object, (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH); - if (err) + if (err != 0) return (err); dmu_buf_will_dirty(db, tx); @@ -249,7 +241,7 @@ zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uin err = dmu_buf_hold(zap->zap_objset, zap->zap_object, (tbl->zt_nextblk + blk2) << bs, FTAG, &db2, DMU_READ_NO_PREFETCH); - if (err) { + if (err != 0) { dmu_buf_rele(db, FTAG); return (err); } @@ -268,27 +260,24 @@ zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uin static int zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp) { - uint64_t blk, off; - int err; - dmu_buf_t *db; - dnode_t *dn; int bs = FZAP_BLOCK_SHIFT(zap); ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); - blk = idx >> (bs-3); - off = idx & ((1<<(bs-3))-1); + uint64_t blk = idx >> (bs-3); + uint64_t off = idx & ((1<<(bs-3))-1); /* * Note: this is equivalent to dmu_buf_hold(), but we use * _dnode_enter / _by_dnode because it's faster because we don't * have to hold the dnode. */ - dn = dmu_buf_dnode_enter(zap->zap_dbuf); - err = dmu_buf_hold_by_dnode(dn, + dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf); + dmu_buf_t *db; + int err = dmu_buf_hold_by_dnode(dn, (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH); dmu_buf_dnode_exit(zap->zap_dbuf); - if (err) + if (err != 0) return (err); *valp = ((uint64_t *)db->db_data)[off]; dmu_buf_rele(db, FTAG); @@ -319,11 +308,10 @@ zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint static void zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n) { - int i; - for (i = 0; i < n; i++) { + for (int i = 0; i < n; i++) { uint64_t lb = src[i]; - dst[2*i+0] = lb; - dst[2*i+1] = lb; + dst[2 * i + 0] = lb; + dst[2 * i + 1] = lb; } } @@ -345,19 +333,16 @@ zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx) * stored in the header block). Give it its own entire * block, which will double the size of the ptrtbl. */ - uint64_t newblk; - dmu_buf_t *db_new; - int err; - ASSERT3U(zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==, ZAP_EMBEDDED_PTRTBL_SHIFT(zap)); ASSERT0(zap_f_phys(zap)->zap_ptrtbl.zt_blk); - newblk = zap_allocate_blocks(zap, 1); - err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + uint64_t newblk = zap_allocate_blocks(zap, 1); + dmu_buf_t *db_new; + int err = dmu_buf_hold(zap->zap_objset, zap->zap_object, newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new, DMU_READ_NO_PREFETCH); - if (err) + if (err != 0) return (err); dmu_buf_will_dirty(db_new, tx); zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), @@ -392,9 +377,8 @@ zap_increment_num_entries(zap_t *zap, int delta, dmu_t static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks) { - uint64_t newblk; ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); - newblk = zap_f_phys(zap)->zap_freeblk; + uint64_t newblk = zap_f_phys(zap)->zap_freeblk; zap_f_phys(zap)->zap_freeblk += nblocks; return (newblk); } @@ -411,7 +395,6 @@ zap_leaf_evict_sync(void *dbu) static zap_leaf_t * zap_create_leaf(zap_t *zap, dmu_tx_t *tx) { - void *winner; zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); @@ -421,12 +404,11 @@ zap_create_leaf(zap_t *zap, dmu_tx_t *tx) l->l_blkid = zap_allocate_blocks(zap, 1); l->l_dbuf = NULL; - VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object, l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf, DMU_READ_NO_PREFETCH)); dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf); - winner = dmu_buf_set_user(l->l_dbuf, &l->l_dbu); - ASSERT(winner == NULL); + VERIFY3P(NULL, ==, dmu_buf_set_user(l->l_dbuf, &l->l_dbu)); dmu_buf_will_dirty(l->l_dbuf, tx); zap_leaf_init(l, zap->zap_normflags != 0); @@ -460,11 +442,9 @@ zap_put_leaf(zap_leaf_t *l) static zap_leaf_t * zap_open_leaf(uint64_t blkid, dmu_buf_t *db) { - zap_leaf_t *l, *winner; - ASSERT(blkid != 0); - l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); + zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP); rw_init(&l->l_rwlock, 0, 0, 0); rw_enter(&l->l_rwlock, RW_WRITER); l->l_blkid = blkid; @@ -472,7 +452,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db) l->l_dbuf = db; dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf); - winner = dmu_buf_set_user(db, &l->l_dbu); + zap_leaf_t *winner = dmu_buf_set_user(db, &l->l_dbu); rw_exit(&l->l_rwlock); if (winner != NULL) { @@ -510,17 +490,15 @@ zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_ zap_leaf_t **lp) { dmu_buf_t *db; - zap_leaf_t *l; - int bs = FZAP_BLOCK_SHIFT(zap); - int err; ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + int bs = FZAP_BLOCK_SHIFT(zap); dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf); - err = dmu_buf_hold_by_dnode(dn, + int err = dmu_buf_hold_by_dnode(dn, blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH); dmu_buf_dnode_exit(zap->zap_dbuf); - if (err) + if (err != 0) return (err); ASSERT3U(db->db_object, ==, zap->zap_object); @@ -528,7 +506,7 @@ zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_ ASSERT3U(db->db_size, ==, 1 << bs); ASSERT(blkid != 0); - l = dmu_buf_get_user(db); + zap_leaf_t *l = dmu_buf_get_user(db); if (l == NULL) l = zap_open_leaf(blkid, db); @@ -583,8 +561,7 @@ zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t static int zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp) { - uint64_t idx, blk; - int err; + uint64_t blk; ASSERT(zap->zap_dbuf == NULL || zap_f_phys(zap) == zap->zap_dbuf->db_data); @@ -596,8 +573,8 @@ zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, k return (SET_ERROR(EIO)); } - idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift); - err = zap_idx_to_blk(zap, idx, &blk); + uint64_t idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift); + int err = zap_idx_to_blk(zap, idx, &blk); if (err != 0) return (err); err = zap_get_leaf_byblk(zap, blk, tx, lt, lp); @@ -614,9 +591,7 @@ zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, { zap_t *zap = zn->zn_zap; uint64_t hash = zn->zn_hash; - zap_leaf_t *nl; - int prefix_diff, i, err; - uint64_t sibling; + int err; int old_prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len; ASSERT3U(old_prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift); @@ -636,19 +611,19 @@ zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, err = zap_lockdir(os, object, tx, RW_WRITER, FALSE, FALSE, tag, &zn->zn_zap); zap = zn->zn_zap; - if (err) + if (err != 0) return (err); ASSERT(!zap->zap_ismicro); while (old_prefix_len == zap_f_phys(zap)->zap_ptrtbl.zt_shift) { err = zap_grow_ptrtbl(zap, tx); - if (err) + if (err != 0) return (err); } err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l); - if (err) + if (err != 0) return (err); if (zap_leaf_phys(l)->l_hdr.lh_prefix_len != old_prefix_len) { @@ -662,25 +637,26 @@ zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==, zap_leaf_phys(l)->l_hdr.lh_prefix); - prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift - + int prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift - (old_prefix_len + 1); - sibling = (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff; + uint64_t sibling = + (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff; /* check for i/o errors before doing zap_leaf_split */ - for (i = 0; i < (1ULL<<prefix_diff); i++) { + for (int i = 0; i < (1ULL << prefix_diff); i++) { uint64_t blk; - err = zap_idx_to_blk(zap, sibling+i, &blk); - if (err) + err = zap_idx_to_blk(zap, sibling + i, &blk); + if (err != 0) return (err); ASSERT3U(blk, ==, l->l_blkid); } - nl = zap_create_leaf(zap, tx); + zap_leaf_t *nl = zap_create_leaf(zap, tx); zap_leaf_split(l, nl, zap->zap_normflags != 0); /* set sibling pointers */ - for (i = 0; i < (1ULL << prefix_diff); i++) { - err = zap_set_idx_to_blk(zap, sibling+i, nl->l_blkid, tx); + for (int i = 0; i < (1ULL << prefix_diff); i++) { + err = zap_set_idx_to_blk(zap, sibling + i, nl->l_blkid, tx); ASSERT0(err); /* we checked for i/o errors above */ } @@ -708,8 +684,6 @@ zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_lea zap_put_leaf(l); if (leaffull || zap_f_phys(zap)->zap_ptrtbl.zt_nextblk) { - int err; - /* * We are in the middle of growing the pointer table, or * this leaf will soon make us grow it. @@ -719,10 +693,10 @@ zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_lea uint64_t zapobj = zap->zap_object; zap_unlockdir(zap, tag); - err = zap_lockdir(os, zapobj, tx, + int err = zap_lockdir(os, zapobj, tx, RW_WRITER, FALSE, FALSE, tag, &zn->zn_zap); zap = zn->zn_zap; - if (err) + if (err != 0) return; } @@ -763,9 +737,8 @@ fzap_checksize(uint64_t integer_size, uint64_t num_int static int fzap_check(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers) { - int err; - - if ((err = fzap_checkname(zn)) != 0) + int err = fzap_checkname(zn); + if (err != 0) return (err); return (fzap_checksize(integer_size, num_integers)); } @@ -779,10 +752,10 @@ fzap_lookup(zap_name_t *zn, char *realname, int rn_len, boolean_t *ncp) { zap_leaf_t *l; - int err; zap_entry_handle_t zeh; - if ((err = fzap_checkname(zn)) != 0) + int err = fzap_checkname(zn); + if (err != 0) return (err); err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l); @@ -870,7 +843,8 @@ fzap_update(zap_name_t *zn, void *tag, dmu_tx_t *tx) { zap_leaf_t *l; - int err, create; + int err; + boolean_t create; zap_entry_handle_t zeh; zap_t *zap = zn->zn_zap; @@ -923,9 +897,9 @@ fzap_length(zap_name_t *zn, if (err != 0) goto out; - if (integer_size) + if (integer_size != 0) *integer_size = zeh.zeh_integer_size; - if (num_integers) + if (num_integers != 0) *num_integers = zeh.zeh_num_integers; out: zap_put_leaf(l); @@ -954,15 +928,14 @@ fzap_remove(zap_name_t *zn, dmu_tx_t *tx) void fzap_prefetch(zap_name_t *zn) { - uint64_t idx, blk; + uint64_t blk; zap_t *zap = zn->zn_zap; - int bs; - idx = ZAP_HASH_IDX(zn->zn_hash, + uint64_t idx = ZAP_HASH_IDX(zn->zn_hash, zap_f_phys(zap)->zap_ptrtbl.zt_shift); if (zap_idx_to_blk(zap, idx, &blk) != 0) return; - bs = FZAP_BLOCK_SHIFT(zap); + int bs = FZAP_BLOCK_SHIFT(zap); dmu_prefetch(zap->zap_objset, zap->zap_object, 0, blk << bs, 1 << bs, ZIO_PRIORITY_SYNC_READ); } @@ -975,9 +948,8 @@ uint64_t zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj, const char *name, dmu_tx_t *tx) { - uint64_t new_obj; - - VERIFY((new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx)) > 0); + uint64_t new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx); + VERIFY(new_obj != 0); VERIFY0(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj, tx)); @@ -989,13 +961,12 @@ zap_value_search(objset_t *os, uint64_t zapobj, uint64 char *name) { zap_cursor_t zc; - zap_attribute_t *za; int err; if (mask == 0) mask = -1ULL; - za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); + zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); for (zap_cursor_init(&zc, os, zapobj); (err = zap_cursor_retrieve(&zc, za)) == 0; zap_cursor_advance(&zc)) { @@ -1005,7 +976,7 @@ zap_value_search(objset_t *os, uint64_t zapobj, uint64 } } zap_cursor_fini(&zc); - kmem_free(za, sizeof (zap_attribute_t)); + kmem_free(za, sizeof (*za)); return (err); } @@ -1013,23 +984,23 @@ int zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx) { zap_cursor_t zc; - zap_attribute_t za; - int err; + int err = 0; - err = 0; + zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); for (zap_cursor_init(&zc, os, fromobj); - zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_retrieve(&zc, za) == 0; (void) zap_cursor_advance(&zc)) { - if (za.za_integer_length != 8 || za.za_num_integers != 1) { + if (za->za_integer_length != 8 || za->za_num_integers != 1) { err = SET_ERROR(EINVAL); break; } - err = zap_add(os, intoobj, za.za_name, - 8, 1, &za.za_first_integer, tx); - if (err) + err = zap_add(os, intoobj, za->za_name, + 8, 1, &za->za_first_integer, tx); + if (err != 0) break; } zap_cursor_fini(&zc); + kmem_free(za, sizeof (*za)); return (err); } @@ -1038,23 +1009,23 @@ zap_join_key(objset_t *os, uint64_t fromobj, uint64_t uint64_t value, dmu_tx_t *tx) { zap_cursor_t zc; - zap_attribute_t za; - int err; + int err = 0; - err = 0; + zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); for (zap_cursor_init(&zc, os, fromobj); - zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_retrieve(&zc, za) == 0; (void) zap_cursor_advance(&zc)) { - if (za.za_integer_length != 8 || za.za_num_integers != 1) { + if (za->za_integer_length != 8 || za->za_num_integers != 1) { err = SET_ERROR(EINVAL); break; } - err = zap_add(os, intoobj, za.za_name, + err = zap_add(os, intoobj, za->za_name, 8, 1, &value, tx); - if (err) + if (err != 0) break; } zap_cursor_fini(&zc); + kmem_free(za, sizeof (*za)); return (err); } @@ -1063,29 +1034,29 @@ zap_join_increment(objset_t *os, uint64_t fromobj, uin dmu_tx_t *tx) { zap_cursor_t zc; - zap_attribute_t za; - int err; + int err = 0; - err = 0; + zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP); for (zap_cursor_init(&zc, os, fromobj); - zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_retrieve(&zc, za) == 0; (void) zap_cursor_advance(&zc)) { uint64_t delta = 0; - if (za.za_integer_length != 8 || za.za_num_integers != 1) { + if (za->za_integer_length != 8 || za->za_num_integers != 1) { err = SET_ERROR(EINVAL); break; } - err = zap_lookup(os, intoobj, za.za_name, 8, 1, &delta); + err = zap_lookup(os, intoobj, za->za_name, 8, 1, &delta); if (err != 0 && err != ENOENT) break; - delta += za.za_first_integer; - err = zap_update(os, intoobj, za.za_name, 8, 1, &delta, tx); - if (err) + delta += za->za_first_integer; + err = zap_update(os, intoobj, za->za_name, 8, 1, &delta, tx); + if (err != 0) break; } zap_cursor_fini(&zc); + kmem_free(za, sizeof (*za)); return (err); } @@ -1150,12 +1121,11 @@ zap_increment(objset_t *os, uint64_t obj, const char * dmu_tx_t *tx) { uint64_t value = 0; - int err; if (delta == 0) return (0); - err = zap_lookup(os, obj, name, 8, 1, &value); + int err = zap_lookup(os, obj, name, 8, 1, &value); if (err != 0 && err != ENOENT) return (err); value += delta; @@ -1253,7 +1223,6 @@ again: static void zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs) { - int i, err; uint64_t lastblk = 0; /* @@ -1261,14 +1230,14 @@ zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, z * can hold, then it'll be accounted for more than once, since * we won't have lastblk. */ - for (i = 0; i < len; i++) { + for (int i = 0; i < len; i++) { zap_leaf_t *l; if (tbl[i] == lastblk) continue; lastblk = tbl[i]; - err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l); + int err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l); if (err == 0) { zap_leaf_stats(zap, l, zs); zap_put_leaf(l); @@ -1308,14 +1277,12 @@ fzap_get_stats(zap_t *zap, zap_stats_t *zs) zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs); } else { - int b; - dmu_prefetch(zap->zap_objset, zap->zap_object, 0, zap_f_phys(zap)->zap_ptrtbl.zt_blk << bs, zap_f_phys(zap)->zap_ptrtbl.zt_numblks << bs, ZIO_PRIORITY_SYNC_READ); - for (b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks; + for (int b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks; b++) { dmu_buf_t *db; int err; Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/zap_leaf.c ============================================================================== --- vendor-sys/illumos/dist/uts/common/fs/zfs/zap_leaf.c Wed Aug 1 02:59:56 2018 (r337026) +++ vendor-sys/illumos/dist/uts/common/fs/zfs/zap_leaf.c Wed Aug 1 03:03:15 2018 (r337027) @@ -21,7 +21,7 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2013, 2015 by Delphix. All rights reserved. + * Copyright (c) 2013, 2016 by Delphix. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. */ @@ -107,7 +107,6 @@ ldv(int len, const void *addr) void zap_leaf_byteswap(zap_leaf_phys_t *buf, int size) { - int i; zap_leaf_t l; dmu_buf_t l_dbuf; @@ -123,10 +122,10 @@ zap_leaf_byteswap(zap_leaf_phys_t *buf, int size) buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len); buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist); - for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++) + for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++) buf->l_hash[i] = BSWAP_16(buf->l_hash[i]); - for (i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) { + for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) { zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i); struct zap_leaf_entry *le; @@ -162,14 +161,12 @@ zap_leaf_byteswap(zap_leaf_phys_t *buf, int size) void zap_leaf_init(zap_leaf_t *l, boolean_t sort) { - int i; - l->l_bs = highbit64(l->l_dbuf->db_size) - 1; zap_memset(&zap_leaf_phys(l)->l_hdr, 0, sizeof (struct zap_leaf_header)); zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END, 2*ZAP_LEAF_HASH_NUMENTRIES(l)); - for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { + for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE; ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1; } @@ -188,11 +185,9 @@ zap_leaf_init(zap_leaf_t *l, boolean_t sort) static uint16_t zap_leaf_chunk_alloc(zap_leaf_t *l) { - int chunk; - ASSERT(zap_leaf_phys(l)->l_hdr.lh_nfree > 0); - chunk = zap_leaf_phys(l)->l_hdr.lh_freelist; + int chunk = zap_leaf_phys(l)->l_hdr.lh_freelist; ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE); @@ -232,7 +227,7 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf, uint16_t *chunkp = &chunk_head; int byten = 0; uint64_t value = 0; - int shift = (integer_size-1)*8; + int shift = (integer_size - 1) * 8; int len = num_integers; ASSERT3U(num_integers * integer_size, <, MAX_ARRAY_BYTES); @@ -240,10 +235,9 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf, while (len > 0) { uint16_t chunk = zap_leaf_chunk_alloc(l); struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; - int i; la->la_type = ZAP_CHUNK_ARRAY; - for (i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) { + for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) { if (byten == 0) value = ldv(integer_size, buf); la->la_array[i] = value >> shift; @@ -321,10 +315,9 @@ zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk, while (len > 0) { struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; - int i; ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); - for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { + for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { value = (value << 8) | la->la_array[i]; byten++; if (byten == array_int_len) { @@ -347,16 +340,13 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn, int bseen = 0; if (zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY) { - uint64_t *thiskey; - boolean_t match; - + uint64_t *thiskey = + kmem_alloc(array_numints * sizeof (*thiskey), KM_SLEEP); ASSERT(zn->zn_key_intlen == sizeof (*thiskey)); - thiskey = kmem_alloc(array_numints * sizeof (*thiskey), - KM_SLEEP); zap_leaf_array_read(l, chunk, sizeof (*thiskey), array_numints, sizeof (*thiskey), array_numints, thiskey); - match = bcmp(thiskey, zn->zn_key_orig, + boolean_t match = bcmp(thiskey, zn->zn_key_orig, array_numints * sizeof (*thiskey)) == 0; kmem_free(thiskey, array_numints * sizeof (*thiskey)); return (match); @@ -365,11 +355,10 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn, ASSERT(zn->zn_key_intlen == 1); if (zn->zn_matchtype & MT_NORMALIZE) { char *thisname = kmem_alloc(array_numints, KM_SLEEP); - boolean_t match; zap_leaf_array_read(l, chunk, sizeof (char), array_numints, sizeof (char), array_numints, thisname); - match = zap_match(zn, thisname); + boolean_t match = zap_match(zn, thisname); kmem_free(thisname, array_numints); return (match); } @@ -400,12 +389,11 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn, int zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh) { - uint16_t *chunkp; struct zap_leaf_entry *le; ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); - for (chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash); + for (uint16_t *chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash); *chunkp != CHAIN_END; chunkp = &le->le_next) { uint16_t chunk = *chunkp; le = ZAP_LEAF_ENTRY(l, chunk); @@ -446,17 +434,15 @@ int zap_leaf_lookup_closest(zap_leaf_t *l, uint64_t h, uint32_t cd, zap_entry_handle_t *zeh) { - uint16_t chunk; uint64_t besth = -1ULL; uint32_t bestcd = -1U; uint16_t bestlh = ZAP_LEAF_HASH_NUMENTRIES(l)-1; - uint16_t lh; struct zap_leaf_entry *le; ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); - for (lh = LEAF_HASH(l, h); lh <= bestlh; lh++) { - for (chunk = zap_leaf_phys(l)->l_hash[lh]; + for (uint16_t lh = LEAF_HASH(l, h); lh <= bestlh; lh++) { + for (uint16_t chunk = zap_leaf_phys(l)->l_hash[lh]; chunk != CHAIN_END; chunk = le->le_next) { le = ZAP_LEAF_ENTRY(l, chunk); @@ -529,11 +515,10 @@ int zap_entry_update(zap_entry_handle_t *zeh, uint8_t integer_size, uint64_t num_integers, const void *buf) { - int delta_chunks; zap_leaf_t *l = zeh->zeh_leaf; struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, *zeh->zeh_chunkp); - delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) - + int delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) - ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * le->le_value_intlen); if ((int)zap_leaf_phys(l)->l_hdr.lh_nfree < delta_chunks) @@ -550,14 +535,12 @@ zap_entry_update(zap_entry_handle_t *zeh, void zap_entry_remove(zap_entry_handle_t *zeh) { - uint16_t entry_chunk; - struct zap_leaf_entry *le; zap_leaf_t *l = zeh->zeh_leaf; ASSERT3P(zeh->zeh_chunkp, !=, &zeh->zeh_fakechunk); - entry_chunk = *zeh->zeh_chunkp; - le = ZAP_LEAF_ENTRY(l, entry_chunk); + uint16_t entry_chunk = *zeh->zeh_chunkp; + struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry_chunk); ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); zap_leaf_array_free(l, &le->le_name_chunk); @@ -575,15 +558,12 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32 zap_entry_handle_t *zeh) { uint16_t chunk; - uint16_t *chunkp; struct zap_leaf_entry *le; - uint64_t valuelen; - int numchunks; uint64_t h = zn->zn_hash; - valuelen = integer_size * num_integers; + uint64_t valuelen = integer_size * num_integers; - numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints * + int numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints * zn->zn_key_intlen) + ZAP_LEAF_ARRAY_NCHUNKS(valuelen); if (numchunks > ZAP_LEAF_NUMCHUNKS(l)) return (E2BIG); @@ -645,7 +625,7 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32 /* link it into the hash chain */ /* XXX if we did the search above, we could just use that */ - chunkp = zap_leaf_rehash_entry(l, chunk); + uint16_t *chunkp = zap_leaf_rehash_entry(l, chunk); zap_leaf_phys(l)->l_hdr.lh_nentries++; @@ -673,14 +653,13 @@ boolean_t zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn, const char *name, zap_t *zap) { - uint64_t chunk; struct zap_leaf_entry *le; boolean_t allocdzn = B_FALSE; if (zap->zap_normflags == 0) return (B_FALSE); - for (chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash); + for (uint16_t chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash); chunk != CHAIN_END; chunk = le->le_next) { le = ZAP_LEAF_ENTRY(zeh->zeh_leaf, chunk); if (le->le_hash != zeh->zeh_hash) @@ -763,14 +742,11 @@ zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, static void zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl) { - struct zap_leaf_entry *le, *nle; - uint16_t chunk; - - le = ZAP_LEAF_ENTRY(l, entry); + struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry); ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); - chunk = zap_leaf_chunk_alloc(nl); - nle = ZAP_LEAF_ENTRY(nl, chunk); + uint16_t chunk = zap_leaf_chunk_alloc(nl); + struct zap_leaf_entry *nle = ZAP_LEAF_ENTRY(nl, chunk); *nle = *le; /* structure assignment */ (void) zap_leaf_rehash_entry(nl, chunk); @@ -791,7 +767,6 @@ zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_ void zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort) { - int i; int bit = 64 - 1 - zap_leaf_phys(l)->l_hdr.lh_prefix_len; /* set new prefix and prefix_len */ @@ -818,7 +793,7 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_ * but this accesses memory more sequentially, and when we're * called, the block is usually pretty full. */ - for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { + for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i); if (le->le_type != ZAP_CHUNK_ENTRY) continue; @@ -833,9 +808,7 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_ void zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs) { - int i, n; - - n = zap_f_phys(zap)->zap_ptrtbl.zt_shift - + int n = zap_f_phys(zap)->zap_ptrtbl.zt_shift - zap_leaf_phys(l)->l_hdr.lh_prefix_len; n = MIN(n, ZAP_HISTOGRAM_SIZE-1); zs->zs_leafs_with_2n_pointers[n]++; @@ -851,7 +824,7 @@ zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t n = MIN(n, ZAP_HISTOGRAM_SIZE-1); zs->zs_blocks_n_tenths_full[n]++; - for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) { + for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) { int nentries = 0; int chunk = zap_leaf_phys(l)->l_hash[i]; Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/zap_micro.c ============================================================================== --- vendor-sys/illumos/dist/uts/common/fs/zfs/zap_micro.c Wed Aug 1 02:59:56 2018 (r337026) +++ vendor-sys/illumos/dist/uts/common/fs/zfs/zap_micro.c Wed Aug 1 03:03:15 2018 (r337027) @@ -21,7 +21,7 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2011, 2016 by Delphix. All rights reserved. + * Copyright (c) 2011, 2017 by Delphix. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright (c) 2014 Integros [integros.com] * Copyright 2017 Nexenta Systems, Inc. @@ -89,22 +89,20 @@ zap_hash(zap_name_t *zn) ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) { *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201808010303.w7133FfN037368>