From owner-svn-src-head@freebsd.org Wed Jan 16 06:10:56 2019 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 2D5DF14A4465; Wed, 16 Jan 2019 06:10:56 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id C8B848E742; Wed, 16 Jan 2019 06:10:55 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id BBFC2261FC; Wed, 16 Jan 2019 06:10:55 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id x0G6AtHN006350; Wed, 16 Jan 2019 06:10:55 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id x0G6Atlp006349; Wed, 16 Jan 2019 06:10:55 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201901160610.x0G6Atlp006349@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Wed, 16 Jan 2019 06:10:55 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r343087 - head/sys/x86/x86 X-SVN-Group: head X-SVN-Commit-Author: kib X-SVN-Commit-Paths: head/sys/x86/x86 X-SVN-Commit-Revision: 343087 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Rspamd-Queue-Id: C8B848E742 X-Spamd-Bar: -- Authentication-Results: mx1.freebsd.org X-Spamd-Result: default: False [-2.97 / 15.00]; local_wl_from(0.00)[FreeBSD.org]; NEURAL_HAM_MEDIUM(-1.00)[-0.999,0]; NEURAL_HAM_SHORT(-0.97)[-0.967,0]; NEURAL_HAM_LONG(-1.00)[-0.999,0] X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 16 Jan 2019 06:10:56 -0000 Author: kib Date: Wed Jan 16 06:10:55 2019 New Revision: 343087 URL: https://svnweb.freebsd.org/changeset/base/343087 Log: Style(9) fixes for x86/busdma_bounce.c. Remove extra parentheses. Adjust indents and lines fill. Sponsored by: The FreeBSD Foundation MFC after: 1 week Modified: head/sys/x86/x86/busdma_bounce.c Modified: head/sys/x86/x86/busdma_bounce.c ============================================================================== --- head/sys/x86/x86/busdma_bounce.c Wed Jan 16 05:51:03 2019 (r343086) +++ head/sys/x86/x86/busdma_bounce.c Wed Jan 16 06:10:55 2019 (r343087) @@ -137,19 +137,16 @@ static void init_bounce_pages(void *dummy); static int alloc_bounce_zone(bus_dma_tag_t dmat); static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); + int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr1, - bus_addr_t addr2, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr1, bus_addr_t addr2, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - pmap_t pmap, void *buf, bus_size_t buflen, - int flags); + pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_paddr_t buf, bus_size_t buflen, - int flags); + vm_paddr_t buf, bus_size_t buflen, int flags); static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int flags); + int flags); static int bounce_bus_dma_zone_setup(bus_dma_tag_t dmat) @@ -202,15 +199,15 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_si newtag->map_count = 0; newtag->segments = NULL; - if (parent != NULL && ((newtag->common.filter != NULL) || - ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))) + if (parent != NULL && (newtag->common.filter != NULL || + (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)) newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || newtag->common.alignment > 1) newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; - if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && + if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && (flags & BUS_DMA_ALLOCNOW) != 0) error = bounce_bus_dma_zone_setup(newtag); else @@ -309,7 +306,7 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags * exclusion region, a data alignment that is stricter than 1, and/or * an active address boundary. */ - if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) { + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { /* Must bounce */ if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) @@ -448,14 +445,15 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vad * * In the meantime warn the user if malloc gets it wrong. */ - if ((dmat->common.maxsize <= PAGE_SIZE) && - (dmat->common.alignment <= dmat->common.maxsize) && + if (dmat->common.maxsize <= PAGE_SIZE && + dmat->common.alignment <= dmat->common.maxsize && dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { *vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); } else if (dmat->common.nsegments >= - howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && + howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, + PAGE_SIZE)) && dmat->common.alignment <= PAGE_SIZE && (dmat->common.boundary % PAGE_SIZE) == 0) { /* Page-based multi-segment allocations allowed */ @@ -512,7 +510,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_ bus_addr_t curaddr; bus_size_t sgsize; - if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + if (map != &nobounce_dmamap && map->pagesneeded == 0) { /* * Count the number of bounce pages * needed in order to complete this transfer @@ -541,7 +539,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap bus_addr_t paddr; bus_size_t sg_len; - if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + if (map != &nobounce_dmamap && map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem), @@ -580,7 +578,7 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t int page_index; vm_paddr_t paddr; - if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + if (map != &nobounce_dmamap && map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem), @@ -718,7 +716,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dm while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->common.maxsegsz); - if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); @@ -786,7 +784,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_ */ max_sgsize = MIN(buflen, dmat->common.maxsegsz); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); - if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); @@ -853,7 +851,7 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmam paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; max_sgsize = MIN(buflen, dmat->common.maxsegsz); sgsize = PAGE_SIZE - ma_offs; - if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, paddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); @@ -1066,9 +1064,9 @@ alloc_bounce_zone(bus_dma_tag_t dmat) /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->common.alignment <= bz->alignment) && - (dmat->common.lowaddr >= bz->lowaddr) && - (dmat->common.domain == bz->domain)) { + if (dmat->common.alignment <= bz->alignment && + dmat->common.lowaddr >= bz->lowaddr && + dmat->common.domain == bz->domain) { dmat->bounce_zone = bz; return (0); } @@ -1196,7 +1194,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr1, bus_addr_t addr2, bus_size_t size) + bus_addr_t addr1, bus_addr_t addr2, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage;