Date: Tue, 1 Sep 2020 21:20:46 +0000 (UTC) From: Mateusz Guzik <mjg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r365074 - head/sys/vm Message-ID: <202009012120.081LKk9R019184@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mjg Date: Tue Sep 1 21:20:45 2020 New Revision: 365074 URL: https://svnweb.freebsd.org/changeset/base/365074 Log: vm: clean up empty lines in .c and .h files Modified: head/sys/vm/default_pager.c head/sys/vm/memguard.c head/sys/vm/redzone.c head/sys/vm/sg_pager.c head/sys/vm/swap_pager.c head/sys/vm/uma.h head/sys/vm/uma_core.c head/sys/vm/uma_int.h head/sys/vm/vm.h head/sys/vm/vm_fault.c head/sys/vm/vm_map.c head/sys/vm/vm_meter.c head/sys/vm/vm_mmap.c head/sys/vm/vm_object.c head/sys/vm/vm_pager.h head/sys/vm/vm_phys.h head/sys/vm/vm_radix.c head/sys/vm/vnode_pager.c Modified: head/sys/vm/default_pager.c ============================================================================== --- head/sys/vm/default_pager.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/default_pager.c Tue Sep 1 21:20:45 2020 (r365074) @@ -154,4 +154,3 @@ default_pager_haspage(vm_object_t object, vm_pindex_t /* An OBJT_DEFAULT object has no backing store. */ return (FALSE); } - Modified: head/sys/vm/memguard.c ============================================================================== --- head/sys/vm/memguard.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/memguard.c Tue Sep 1 21:20:45 2020 (r365074) @@ -158,7 +158,6 @@ SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); - /* * Return a fudged value to be used for vm_kmem_size for allocating * the kernel_arena. Modified: head/sys/vm/redzone.c ============================================================================== --- head/sys/vm/redzone.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/redzone.c Tue Sep 1 21:20:45 2020 (r365074) @@ -37,7 +37,6 @@ __FBSDID("$FreeBSD$"); #include <vm/redzone.h> - static SYSCTL_NODE(_vm, OID_AUTO, redzone, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "RedZone data"); static u_long redzone_extra_mem = 0; Modified: head/sys/vm/sg_pager.c ============================================================================== --- head/sys/vm/sg_pager.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/sg_pager.c Tue Sep 1 21:20:45 2020 (r365074) @@ -134,7 +134,7 @@ sg_pager_dealloc(vm_object_t object) TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, plinks.q); vm_page_putfake(m); } - + sg = object->handle; sglist_free(sg); object->handle = NULL; Modified: head/sys/vm/swap_pager.c ============================================================================== --- head/sys/vm/swap_pager.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/swap_pager.c Tue Sep 1 21:20:45 2020 (r365074) @@ -865,7 +865,6 @@ swp_pager_strategy(struct buf *bp) panic("Swapdev not found"); } - /* * SWP_PAGER_FREESWAPSPACE() - free raw swap space * @@ -2744,7 +2743,6 @@ static struct g_class g_swap_class = { DECLARE_GEOM_CLASS(g_swap_class, g_class); - static void swapgeom_close_ev(void *arg, int flags) { @@ -3007,7 +3005,6 @@ swapdev_close(struct thread *td, struct swdevt *sp) VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td); vrele(sp->sw_vp); } - static int swaponvp(struct thread *td, struct vnode *vp, u_long nblks) Modified: head/sys/vm/uma.h ============================================================================== --- head/sys/vm/uma.h Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/uma.h Tue Sep 1 21:20:45 2020 (r365074) @@ -154,7 +154,6 @@ typedef void (*uma_release)(void *arg, void **store, i * */ - /* Function proto types */ /* Modified: head/sys/vm/uma_core.c ============================================================================== --- head/sys/vm/uma_core.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/uma_core.c Tue Sep 1 21:20:45 2020 (r365074) @@ -1892,7 +1892,6 @@ pcpu_page_free(void *mem, vm_size_t size, uint8_t flag kva_free(sva, size); } - /* * Zero fill initializer * @@ -4975,7 +4974,6 @@ uma_vm_zone_stats(struct uma_type_header *uth, uma_zon uma_zone_domain_t zdom; uma_cache_t cache; int i; - for (i = 0; i < vm_ndomains; i++) { zdom = ZDOM_GET(z, i); Modified: head/sys/vm/uma_int.h ============================================================================== --- head/sys/vm/uma_int.h Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/uma_int.h Tue Sep 1 21:20:45 2020 (r365074) @@ -307,14 +307,14 @@ cache_uz_flags(uma_cache_t cache) return (cache->uc_freebucket.ucb_spare); } - + static inline uint32_t cache_uz_size(uma_cache_t cache) { return (cache->uc_allocbucket.ucb_spare); } - + /* * Per-domain slab lists. Embedded in the kegs. */ Modified: head/sys/vm/vm.h ============================================================================== --- head/sys/vm/vm.h Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm.h Tue Sep 1 21:20:45 2020 (r365074) @@ -163,4 +163,3 @@ void swapper(void); #endif #endif /* VM_H */ - Modified: head/sys/vm/vm_fault.c ============================================================================== --- head/sys/vm/vm_fault.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_fault.c Tue Sep 1 21:20:45 2020 (r365074) @@ -872,7 +872,6 @@ vm_fault_cow(struct faultstate *fs) (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && fs->object == fs->first_object->backing_object && VM_OBJECT_TRYWLOCK(fs->object)) { - /* * Remove but keep xbusy for replace. fs->m is moved into * fs->first_object and left busy while fs->first_m is @@ -1010,7 +1009,6 @@ vm_fault_allocate(struct faultstate *fs) struct domainset *dset; int alloc_req; int rv; - if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { rv = vm_fault_lock_vnode(fs, true); Modified: head/sys/vm/vm_map.c ============================================================================== --- head/sys/vm/vm_map.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_map.c Tue Sep 1 21:20:45 2020 (r365074) @@ -2940,7 +2940,6 @@ vm_map_madvise( return (0); } - /* * vm_map_inherit: * @@ -3235,7 +3234,6 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset return (rv); } - /* * vm_map_wire_locked: * @@ -3823,7 +3821,6 @@ vm_map_check_protection(vm_map_t map, vm_offset_t star } return (TRUE); } - /* * Modified: head/sys/vm/vm_meter.c ============================================================================== --- head/sys/vm/vm_meter.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_meter.c Tue Sep 1 21:20:45 2020 (r365074) @@ -124,7 +124,7 @@ SYSCTL_UINT(_vm, OID_AUTO, v_free_severe, static int sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS) { - + #ifdef SCTL_MASK32 u_int32_t la[4]; Modified: head/sys/vm/vm_mmap.c ============================================================================== --- head/sys/vm/vm_mmap.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_mmap.c Tue Sep 1 21:20:45 2020 (r365074) @@ -148,7 +148,6 @@ ogetpagesize(struct thread *td, struct ogetpagesize_ar } #endif /* COMPAT_43 */ - /* * Memory Map (mmap) system call. Note that the file offset * and address are allowed to be NOT page aligned, though if @@ -257,7 +256,7 @@ kern_mmap_req(struct thread *td, const struct mmap_req * Ignore old flags that used to be defined but did not do anything. */ flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040); - + /* * Enforce the constraints. * Mapping of length 0 is only allowed for old binaries. @@ -498,7 +497,6 @@ ommap(struct thread *td, struct ommap_args *uap) } #endif /* COMPAT_43 */ - #ifndef _SYS_SYSPROTO_H_ struct msync_args { void *addr; @@ -846,7 +844,6 @@ RestartScan: */ lastvecindex = -1; while (entry->start < end) { - /* * check for contiguity */ Modified: head/sys/vm/vm_object.c ============================================================================== --- head/sys/vm/vm_object.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_object.c Tue Sep 1 21:20:45 2020 (r365074) @@ -278,7 +278,7 @@ vm_object_init(void) { TAILQ_INIT(&vm_object_list); mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); - + rw_init(&kernel_object->lock, "kernel vm object"); _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL); @@ -556,7 +556,6 @@ vm_object_deallocate_vnode(vm_object_t object) vrele(vp); } - /* * We dropped a reference on an object and discovered that it had a * single remaining shadow. This is a sibling of the reference we @@ -2269,7 +2268,6 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset * Account for the charge. */ if (prev_object->cred != NULL) { - /* * If prev_object was charged, then this mapping, * although not charged now, may become writable @@ -2434,7 +2432,6 @@ vm_object_vnode(vm_object_t object) } return (vp); } - /* * Busy the vm object. This prevents new pages belonging to the object from Modified: head/sys/vm/vm_pager.h ============================================================================== --- head/sys/vm/vm_pager.h Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_pager.h Tue Sep 1 21:20:45 2020 (r365074) @@ -130,7 +130,6 @@ vm_pager_put_pages( int flags, int *rtvals ) { - VM_OBJECT_ASSERT_WLOCKED(object); (*pagertab[object->type]->pgo_putpages) (object, m, count, flags, rtvals); @@ -172,7 +171,6 @@ vm_pager_populate(vm_object_t object, vm_pindex_t pidx return ((*pagertab[object->type]->pgo_populate)(object, pidx, fault_type, max_prot, first, last)); } - /* * vm_pager_page_unswapped Modified: head/sys/vm/vm_phys.h ============================================================================== --- head/sys/vm/vm_phys.h Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_phys.h Tue Sep 1 21:20:45 2020 (r365074) @@ -109,7 +109,6 @@ void vm_phys_early_startup(void); int vm_phys_avail_largest(void); vm_paddr_t vm_phys_avail_size(int i); - /* * * vm_phys_domain: Modified: head/sys/vm/vm_radix.c ============================================================================== --- head/sys/vm/vm_radix.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vm_radix.c Tue Sep 1 21:20:45 2020 (r365074) @@ -217,7 +217,6 @@ vm_radix_node_store(smrnode_t *p, struct vm_radix_node enum vm_radix_access access) { - switch (access) { case UNSERIALIZED: smr_unserialized_store(p, v, true); Modified: head/sys/vm/vnode_pager.c ============================================================================== --- head/sys/vm/vnode_pager.c Tue Sep 1 21:20:08 2020 (r365073) +++ head/sys/vm/vnode_pager.c Tue Sep 1 21:20:45 2020 (r365074) @@ -228,7 +228,6 @@ vnode_destroy_vobject(struct vnode *vp) KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); } - /* * Allocate (or lookup) pager for a vnode. * Handle is a vnode pointer.
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202009012120.081LKk9R019184>