From owner-p4-projects@FreeBSD.ORG Mon Oct 22 08:53:25 2012 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id B0A91FFF; Mon, 22 Oct 2012 08:53:25 +0000 (UTC) Delivered-To: perforce@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 5B654FFD for ; Mon, 22 Oct 2012 08:53:25 +0000 (UTC) (envelope-from bb+lists.freebsd.perforce@cyrus.watson.org) Received: from skunkworks.freebsd.org (skunkworks.freebsd.org [IPv6:2001:4f8:fff6::2d]) by mx1.freebsd.org (Postfix) with ESMTP id 3E76F8FC0A for ; Mon, 22 Oct 2012 08:53:25 +0000 (UTC) Received: from skunkworks.freebsd.org (localhost [127.0.0.1]) by skunkworks.freebsd.org (8.14.4/8.14.4) with ESMTP id q9M8rOcQ088849 for ; Mon, 22 Oct 2012 08:53:25 GMT (envelope-from bb+lists.freebsd.perforce@cyrus.watson.org) Received: (from perforce@localhost) by skunkworks.freebsd.org (8.14.4/8.14.4/Submit) id q9M8rOlL088846 for perforce@freebsd.org; Mon, 22 Oct 2012 08:53:24 GMT (envelope-from bb+lists.freebsd.perforce@cyrus.watson.org) Date: Mon, 22 Oct 2012 08:53:24 GMT Message-Id: <201210220853.q9M8rOlL088846@skunkworks.freebsd.org> X-Authentication-Warning: skunkworks.freebsd.org: perforce set sender to bb+lists.freebsd.perforce@cyrus.watson.org using -f From: Robert Watson Subject: PERFORCE change 218892 for review To: Perforce Change Reviews Precedence: bulk X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.14 List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 22 Oct 2012 08:53:26 -0000 http://p4web.freebsd.org/@@218892?ac=10 Change 218892 by rwatson@rwatson_svr_ctsrd_mipsbuild on 2012/10/22 08:52:30 Update CheriBSD inline assembly to use CHERI ISAv2 syntax and semantics. Trim some utility functions that we're not actually using (and won't be required once we have compiler support). This inolves a lot of GNU assembler constraints and clobbers, which generally suffer a high likelihood of incorrectness -- follow-up commits are almost certain. Affected files ... .. //depot/projects/ctsrd/cheribsd/src/sys/mips/cheri/cp2.c#14 edit .. //depot/projects/ctsrd/cheribsd/src/sys/mips/include/cheri.h#7 edit Differences ... ==== //depot/projects/ctsrd/cheribsd/src/sys/mips/cheri/cp2.c#14 (text+ko) ==== @@ -161,290 +161,6 @@ cp2_capability_copy(&cf_destp->cf_pcc, &cf_srcp->cf_pcc); } -/* - * Functions for writing via arbitrary capability registers. The CP2 macros - * cannot be used this way as they require the register number to be available - * at compile-time, not run-time. Once we have improved compiler support for - * capabilities, this problem should go away. - */ -void -cp2_store_hword_via(u_int crn, uint64_t offset, uint16_t h) -{ - - switch (crn) { - case 0: - CP2_STORE_HWORD_VIA(0, offset, h); - break; - - case 1: - CP2_STORE_HWORD_VIA(1, offset, h); - break; - - case 2: - CP2_STORE_HWORD_VIA(2, offset, h); - break; - - case 3: - CP2_STORE_HWORD_VIA(3, offset, h); - break; - - case 4: - CP2_STORE_HWORD_VIA(4, offset, h); - break; - - case 5: - CP2_STORE_HWORD_VIA(5, offset, h); - break; - - case 6: - CP2_STORE_HWORD_VIA(6, offset, h); - break; - - case 7: - CP2_STORE_HWORD_VIA(7, offset, h); - break; - - case 8: - CP2_STORE_HWORD_VIA(8, offset, h); - break; - - case 9: - CP2_STORE_HWORD_VIA(9, offset, h); - break; - - case 10: - CP2_STORE_HWORD_VIA(10, offset, h); - break; - - case 11: - CP2_STORE_HWORD_VIA(11, offset, h); - break; - - case 12: - CP2_STORE_HWORD_VIA(12, offset, h); - break; - - case 13: - CP2_STORE_HWORD_VIA(13, offset, h); - break; - - case 14: - CP2_STORE_HWORD_VIA(14, offset, h); - break; - - case 15: - CP2_STORE_HWORD_VIA(15, offset, h); - break; - - case 16: - CP2_STORE_HWORD_VIA(16, offset, h); - break; - - case 17: - CP2_STORE_HWORD_VIA(17, offset, h); - break; - - case 18: - CP2_STORE_HWORD_VIA(18, offset, h); - break; - - case 19: - CP2_STORE_HWORD_VIA(19, offset, h); - break; - - case 20: - CP2_STORE_HWORD_VIA(20, offset, h); - break; - - case 21: - CP2_STORE_HWORD_VIA(21, offset, h); - break; - - case 22: - CP2_STORE_HWORD_VIA(22, offset, h); - break; - - case 23: - CP2_STORE_HWORD_VIA(23, offset, h); - break; - - case 24: - CP2_STORE_HWORD_VIA(24, offset, h); - break; - - case 25: - CP2_STORE_HWORD_VIA(25, offset, h); - break; - - case 26: - CP2_STORE_HWORD_VIA(26, offset, h); - break; - - case 27: - CP2_STORE_HWORD_VIA(27, offset, h); - break; - - case 28: - CP2_STORE_HWORD_VIA(28, offset, h); - break; - - case 29: - CP2_STORE_HWORD_VIA(29, offset, h); - break; - - case 30: - CP2_STORE_HWORD_VIA(30, offset, h); - break; - - case 31: - CP2_STORE_HWORD_VIA(31, offset, h); - break; - - default: - /* XXXRW: Arguably should panic. */ - break; - } -} - -void -cp2_store_dword_via(u_int crn, uint64_t offset, uint64_t d) -{ - - switch (crn) { - case 0: - CP2_STORE_DWORD_VIA(0, offset, d); - break; - - case 1: - CP2_STORE_DWORD_VIA(1, offset, d); - break; - - case 2: - CP2_STORE_DWORD_VIA(2, offset, d); - break; - - case 3: - CP2_STORE_DWORD_VIA(3, offset, d); - break; - - case 4: - CP2_STORE_DWORD_VIA(4, offset, d); - break; - - case 5: - CP2_STORE_DWORD_VIA(5, offset, d); - break; - - case 6: - CP2_STORE_DWORD_VIA(6, offset, d); - break; - - case 7: - CP2_STORE_DWORD_VIA(7, offset, d); - break; - - case 8: - CP2_STORE_DWORD_VIA(8, offset, d); - break; - - case 9: - CP2_STORE_DWORD_VIA(9, offset, d); - break; - - case 10: - CP2_STORE_DWORD_VIA(10, offset, d); - break; - - case 11: - CP2_STORE_DWORD_VIA(11, offset, d); - break; - - case 12: - CP2_STORE_DWORD_VIA(12, offset, d); - break; - - case 13: - CP2_STORE_DWORD_VIA(13, offset, d); - break; - - case 14: - CP2_STORE_DWORD_VIA(14, offset, d); - break; - - case 15: - CP2_STORE_DWORD_VIA(15, offset, d); - break; - - case 16: - CP2_STORE_DWORD_VIA(16, offset, d); - break; - - case 17: - CP2_STORE_DWORD_VIA(17, offset, d); - break; - - case 18: - CP2_STORE_DWORD_VIA(18, offset, d); - break; - - case 19: - CP2_STORE_DWORD_VIA(19, offset, d); - break; - - case 20: - CP2_STORE_DWORD_VIA(20, offset, d); - break; - - case 21: - CP2_STORE_DWORD_VIA(21, offset, d); - break; - - case 22: - CP2_STORE_DWORD_VIA(22, offset, d); - break; - - case 23: - CP2_STORE_DWORD_VIA(23, offset, d); - break; - - case 24: - CP2_STORE_DWORD_VIA(24, offset, d); - break; - - case 25: - CP2_STORE_DWORD_VIA(25, offset, d); - break; - - case 26: - CP2_STORE_DWORD_VIA(26, offset, d); - break; - - case 27: - CP2_STORE_DWORD_VIA(27, offset, d); - break; - - case 28: - CP2_STORE_DWORD_VIA(28, offset, d); - break; - - case 29: - CP2_STORE_DWORD_VIA(29, offset, d); - break; - - case 30: - CP2_STORE_DWORD_VIA(30, offset, d); - break; - - case 31: - CP2_STORE_DWORD_VIA(31, offset, d); - break; - - default: - /* XXXRW: Arguably should panic. */ - break; - } -} - void cheri_exec_setregs(struct thread *td) { ==== //depot/projects/ctsrd/cheribsd/src/sys/mips/include/cheri.h#7 (text+ko) ==== @@ -32,7 +32,9 @@ #define _MIPS_INCLUDE_CHERI_H_ #ifdef _KERNEL -#include /* CTASSERT */ +#include /* CTASSERT() */ +#else +#include /* assert() */ #endif #include @@ -40,19 +42,23 @@ /* * Canonical C-language representation of a capability. */ -#define CAPABILITY_SIZE 32 +#define CHERICAP_SIZE 32 struct chericap { uint32_t c_reserved; - uint32_t c_uperms; - union { - uint64_t c_otype; - uint64_t c_eaddr; - } u; +#if BYTE_ORDER == BIG_ENDIAN + /* XXXRW: This definitely needs some testing. */ + uint32_t c_unsealed:1; + uint32_t c_perms:15; + uint32_t _c_padding0:16; +#else +#error "BYTE_ORDER != BIG_ENDIAN not yet supported" +#endif + uint64_t c_otype; uint64_t c_base; uint64_t c_length; -} __packed __aligned(CAPABILITY_SIZE); +} __packed __aligned(CHERICAP_SIZE); #ifdef _KERNEL -CTASSERT(sizeof(struct chericap) == CAPABILITY_SIZE); +CTASSERT(sizeof(struct chericap) == CHERICAP_SIZE); #endif /* @@ -94,228 +100,192 @@ */ struct chericap cf_pcc; }; -CTASSERT(sizeof(struct cp2_frame) == (27 * CAPABILITY_SIZE)); +CTASSERT(sizeof(struct cp2_frame) == (27 * CHERICAP_SIZE)); #endif /* - * CP2 capability register manipulation macros. + * CHERI capability register manipulation macros. */ -#define CP2_CR_GET_BASE(crn, v) do { \ - __asm__ __volatile__ ( \ - "cgetbase %0, $c%1; " : \ - "=r" (v) : "i" (crn)); \ +#define CHERI_CGETBASE(v, cb) do { \ + __asm__ __volatile__ ("cgetbase %0, $c%1" : "=r" (v) : \ + "i" (cb)); \ +} while (0) + +#define CHERI_CGETLEN(v, cb) do { \ + __asm__ __volatile__ ("cgetlen %0, $c%1" : "=r" (v) : \ + "i" (cb)); \ +} while (0) + +#define CHERI_CGETTAG(v, cb) do { \ + __asm__ __volatile__ ("cgettag %0, $c%1" : "=r" (v) : \ + "i" (cb)); \ } while (0) -#define CP2_CR_GET_UPERMS(crn, v) do { \ - __asm__ __volatile__ ( \ - "cgetperm %0, $c%1; " : \ - "=r" (v) : "i" (crn)); \ +#define CHERI_CGETUNSEALED(v, cb) do { \ + __asm__ __volatile__ ("cgetunsealed %0, $c%1" : "=r" (v) : \ + "i" (cb)); \ } while (0) -#define CP2_CR_GET_OTYPE(crn, v) do { \ - __asm__ __volatile__ ( \ - "cgettype %0, $c%1; " : \ - "=r" (v) : "i" (crn)); \ +#define CHERI_CGETPERM(v, cb) do { \ + __asm__ __volatile__ ("cgetperm %0, $c%1" : "=r" (v) : \ + "i" (cb)); \ } while (0) -#define CP2_CR_GET_EADDR(crn, v) CP2_CR_GET_OTYPE(crn, v) +#define CHERI_CGETTYPE(v, cb) do { \ + __asm__ __volatile__ ("cgettype %0, $c%1" : "=r" (v) : \ + "i" (cb)); \ +} while (0) -#define CP2_CR_GET_LENGTH(crn, v) do { \ - __asm__ __volatile__ ( \ - "cgetleng %0, $c%1; " : \ - "=r" (v) : "i" (crn)); \ +#define CHERI_CGETCAUSE(v) do { \ + __asm__ __volatile__ ("cgetcause %0" : "=r" (v)); \ } while (0) -#define CP2_CR_STORE(crn_from, crn_base, offset) do { \ - __asm__ __volatile__ ( \ - "cscr $c%0, %1($c%2); " : \ - : "i" (crn_from), "r" (offset), "i" (crn_base)); \ +/* + * Routines that modify or replace values in capability registers that don't + * affect memory access via the register. These do not require memory + * clobbers. + */ +#define CHERI_CSETTYPE(cd, cb, v) do { \ + __asm__ __volatile__ ("csettype $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v)); \ } while (0) /* - * Routines that modify or replace the values in capability registers. When - * they act on CR0, we need to use a memory clobber so that cached values in - * registers can be written back first, and cached values re-loaded after the - * switch, since effectively we may be changing address space. We do this - * even for permissions modifications and length changes to ensure that a - * writeback disallowed by the update will proceed first. + * Capability store; while this doesn't muck with c0, it does require a memory + * clobber. * - * XXXRW: We don't really need to do this for CP2_CR_SET_OTYPE()? - * - * XXXRW: C macros are named after capability field names -- hence OTYPE - * rather than TYPE. Possibly this is a bug. + * XXXRW: The assembler does not yet support base+offset, just base, so assert + * that offset (for now) is not permitted. + */ +#ifdef _KERNEL +#define CHERI_CSC(cs, cb, regbase, offset) do { \ + KASSERT((offset) == 0, \ + ("CHERI_CSC: non-zero offset not supported")); \ + __asm__ __volatile__ ("csc $c%0, %1($c%2)" : : \ + "i" (cs), "r" (regbase), "i" (cb) : "memory"); \ +} while (0) +#else +#define CHERI_CSC(cs, cb, regbase, offset) do { \ + assert((offset) == 0); \ + __asm__ __volatile__ ("csc $c%0, %1($c%2)" : : \ + "i" (cs), "r" (regbase), "i" (cb) : "memory"); \ +} while (0) +#endif + +/* + * Routines that modify or replace values in capability registers, and that if + * if used on C0, require the compiler to write registers back to memory, and + * reload afterwards, since we may effectively be changing the compiler- + * visible address space. This is also necessary for permissions changes as + * well, to ensure that write-back occurs before a possible loss of store + * permission. */ -#define CP2_CR_MOVE(crn_to, crn_from) do { \ - if ((crn_to) == 0) \ - __asm__ __volatile__ ("cmove $c%0, $c%1" : \ - : "i" (crn_to), "i" (crn_from) : "memory"); \ +#define CHERI_CGETPCC(v, cd) do { \ + if ((cd) == 0) \ + __asm__ __volatile__ ("cgetpcc %0, %c%1" : "=r" (v) : \ + "i" (cd) : "memory"); \ + else \ + __asm__ __volatile__ ("cgetpcc %0, %c%1" : "=r" (v) : \ + "i" (cd)); \ +} while (0) + +#define CHERI_CINCBASE(cd, cb, v) do { \ + if ((cd) == 0) \ + __asm__ __volatile__ ("cincbase $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v) : "memory"); \ else \ - __asm__ __volatile__ ("cmove $c%0, $c%1" : \ - : "i" (crn_to), "i" (crn_from)); \ + __asm__ __volatile__ ("cincbase $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v)); \ } while (0) -#define CP2_CR_INC_BASE(crn_to, crn_from, v) do { \ - if ((crn_to) == 0) \ - __asm__ __volatile__ ( \ - "cincbase $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v) : \ - "memory"); \ +#define CHERI_CMOVE(cd, cb) do { \ + if ((cd) == 0) \ + __asm__ __volatile__ ("cmove $c%0, $c%1" : : \ + "i" (cd), "i" (cb) : "memory"); \ else \ - __asm__ __volatile__ ( \ - "cincbase $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v)); \ + __asm__ __volatile__ ("cmove $c%0, $c%1" : : \ + "i" (cd), "i" (cb)); \ } while (0) -#define CP2_CR_AND_UPERMS(crn_to, crn_from, v) do { \ - if ((crn_to) == 0) \ - __asm__ __volatile__ ( \ - "candperm $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v) : \ - "memory"); \ +#define CHERI_CSETLEN(cd, cb, v) do { \ + if ((cd) == 0) \ + __asm__ __volatile__ ("csetlen $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v) : "memory"); \ else \ - __asm__ __volatile__ ( \ - "candperm $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v)); \ + __asm__ __volatile__ ("csetlen $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v)); \ } while (0) -#define CP2_CR_SET_OTYPE(crn_to, crn_from, v) do { \ - if ((crn_to) == 0) \ - __asm__ __volatile__ ( \ - "csettype $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v) : \ +#define CHERI_CCLEARTAG(cd) do { \ + if ((cd) == 0) \ + __asm__ __volatile__ ("ccleartag $c%0" : : "i" (cd) : \ "memory"); \ else \ - __asm__ __volatile__ ( \ - "csettype $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v)); \ + __asm__ __volatile__ ("ccleartag $c%0" : : "i" (cd)); \ } while (0) -#define CP2_CR_SET_LENGTH(crn_to, crn_from, v) do { \ - if ((crn_to) == 0) \ - __asm__ __volatile__ ( \ - "csetlen $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v) : \ - "memory"); \ +#define CHERI_CANDPERM(cd, cb, v) do { \ + if ((cd) == 0) \ + __asm__ __volatile__ ("candperm $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v) : "memory"); \ else \ - __asm__ __volatile__ ( \ - "csetlen $c%0, $c%1, %2; " : \ - : "i" (crn_to), "i" (crn_from), "r" (v)); \ + __asm__ __volatile__ ("candperm $c%0, $c%1, %2" : : \ + "i" (cd), "i" (cb), "r" (v)); \ } while (0) -#define CP2_CR_LOAD(crn_to, crn_base, offset) do { \ - if ((crn_to) == 0) \ - __asm__ __volatile__ ( \ - "clcr $c%0, %1($c%2); " : \ - : "i" (crn_to), "r" (offset), "i" (crn_base) : \ - "memory"); \ +/* + * XXXRW: The assembler does not yet support base+offset, just base, so assert + * that offset (for now) is not permitted. + */ +#ifdef _KERNEL +#define CHERI_CLC(cd, cb, regbase, offset) do { \ + KASSERT((offset) == 0, \ + ("CHERI_CLC: non-zero offset not supported")); \ + if ((cd) == 0) \ + __asm__ __volatile__ ("clc $c%0, %1($c%2)" : : \ + "i" (cd), "r" (regbase), "i" (cb) : "memory"); \ + else \ + __asm__ __volatile__ ("clc $c%0, %1($c%2)" : : \ + "i" (cd), "r" (regbase), "i" (cb)); \ +} while (0) +#else +#define CHERI_CLC(cd, cb, regbase, offset) do { \ + assert((offset) == 0); \ + if ((cd) == 0) \ + __asm__ __volatile__ ("clc $c%0, %1($c%2)" : : \ + "i" (cd), "r" (regbase), "i" (cb) : "memory"); \ else \ - __asm__ __volatile__ ( \ - "clcr $c%0, %1($c%2); " : \ - : "i" (crn_to), "r" (offset), "i" (crn_base)); \ + __asm__ __volatile__ ("clc $c%0, %1($c%2)" : : \ + "i" (cd), "r" (regbase), "i" (cb)); \ } while (0) +#endif static inline void cp2_capability_load(u_int crn_to, struct chericap *cp) { - CP2_CR_LOAD(crn_to, CHERI_CR_KDC, cp); + CHERI_CLC(crn_to, CHERI_CR_KDC, cp, 0); } static inline void cp2_capability_store(u_int crn_from, struct chericap *cp) { - CP2_CR_STORE(crn_from, CHERI_CR_KDC, cp); + CHERI_CSC(crn_from, CHERI_CR_KDC, cp, 0); } /* * Extract a flattened but useful memory representation of a complete * capability register. - * - * XXXRW: We appear not to have an instruction to extract the unsealed bit. - * It would be nice if this were returned by cp2_cr_get_uperms() as part of - * the permission mask. What are the implications of this for seal - * operations? */ -#define CP2_CR_GET(crn, c) do { \ - CP2_CR_GET_UPERMS((crn), (c).c_uperms); \ - CP2_CR_GET_OTYPE((crn), (c).u.c_otype); \ - CP2_CR_GET_BASE((crn), (c).c_base); \ - CP2_CR_GET_LENGTH((crn), (c).c_length); \ -} while (0) - -#define CP2_CR_SET(crn_to, crn_from, c) do { \ - /* XXXRW: How about the unsealed bit? */ \ - CP2_CR_SET_OTYPE((crn_to), (crn_from), (c).u.c_otype); \ - CP2_CR_INC_BASE((crn_to), (crn_from), (c).c_base); \ - CP2_CR_SET_LENGTH((crn_to), (crn_from), (c).c_length); \ - CP2_CR_AND_UPERMS((crn_to), (crn_from), (c).c_uperms); \ -} while (0) - -/* - * Routines for general-purpose memory loads and stores via capabilities. - * - * XXXRW: We apply memory clobbers rather stringently -- less firm use might - * be required (or possible). One side effect of allowing the compiler - * access to multiple capability-named address spaces is that it is not able - * to reason about overlap... - */ -#define CP2_LOAD_BYTE_VIA(crn, offset, b) do { \ - __asm__ __volatile__ ( \ - "clbr %0, %1($c%2); " : \ - "=r" (b) : "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -#define CP2_LOAD_HWORD_VIA(crn, offset, h) do { \ - __asm__ __volatile__ ( \ - "clhr %0, %1($c%2); " : \ - "=r" (b) : "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -#define CP2_LOAD_WORD_VIA(crn, offset, w) do { \ - __asm__ __volatile__ ( \ - "clwr %0, %1($c%2); " : \ - "=r" (w) : "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -#define CP2_LOAD_DWORD_VIA(crn, offset, d) do { \ - __asm__ __volatile__ ( \ - "cldr %0, %1($c%2); " : \ - "=r" (d) : "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -#define CP2_STORE_BYTE_VIA(crn, offset, b) do { \ - __asm__ __volatile__ ( \ - "csbr %0, %1($c%2); " : \ - : "r" (b), "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -#define CP2_STORE_HWORD_VIA(crn, offset, h) do { \ - __asm__ __volatile__ ( \ - "cshr %0, %1($c%2); " : \ - : "r" (h), "r" (offset), "i" (crn) : "memory"); \ +#define CHERI_GETCAPREG(crn, c) do { \ + CHERI_CGETPERM((c).c_perms, (crn)); \ + CHERI_CGETUNSEALED((c).c_unsealed, (crn)); \ + CHERI_CGETTYPE((c).c_otype, (crn)); \ + CHERI_CGETBASE((c).c_base, (crn)); \ + CHERI_CGETLEN((c).c_length, (crn)); \ } while (0) -#define CP2_STORE_WORD_VIA(crn, offset, w) do { \ - __asm__ __volatile__ ( \ - "cswr %0, %1($c%2); " : \ - : "r" (w), "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -#define CP2_STORE_DWORD_VIA(crn, offset, d) do { \ - __asm__ __volatile__ ( \ - "csdr %0, %1($c%2); " : \ - : "r" (d), "r" (offset), "i" (crn) : "memory"); \ -} while (0) - -/* - * Functions wrapping the above macros in order to allow run-time, rather - * than compile-time, determination of which capability register to use. The - * need for this construction should go away with improved compiler support. - */ -void cp2_store_hword_via(u_int crn, uint64_t offset, uint16_t h); -void cp2_store_dword_via(u_int crn, uint64_t offset, uint64_t d); - /* * APIs that act on C language representations of capabilities -- but not * capabilities themselves.