Date: Wed, 6 Aug 2025 18:43:47 +0100 From: Jessica Clarke <jrtc27@freebsd.org> To: Andrew Turner <andrew@freebsd.org> Cc: "src-committers@freebsd.org" <src-committers@FreeBSD.org>, "dev-commits-src-all@freebsd.org" <dev-commits-src-all@FreeBSD.org>, "dev-commits-src-main@freebsd.org" <dev-commits-src-main@FreeBSD.org> Subject: Re: git: 81f07332c03f - main - arm64: tidy up Top-Byte-Ignore (TBI) in the kernel Message-ID: <BA3B207C-6D34-4748-AE4D-DCD5AFA27789@freebsd.org> In-Reply-To: <202508061738.576Hc0wZ076618@gitrepo.freebsd.org> References: <202508061738.576Hc0wZ076618@gitrepo.freebsd.org>
next in thread | previous in thread | raw e-mail | index | archive | help
On 6 Aug 2025, at 18:38, Andrew Turner <andrew@freebsd.org> wrote: >=20 > The branch main has been updated by andrew: >=20 > URL: = https://cgit.FreeBSD.org/src/commit/?id=3D81f07332c03fd2ac6efa8e15b1659a57= 3d250329 >=20 > commit 81f07332c03fd2ac6efa8e15b1659a573d250329 > Author: Harry Moulton <harry.moulton@arm.com> > AuthorDate: 2025-07-31 14:10:57 +0000 > Commit: Andrew Turner <andrew@FreeBSD.org> > CommitDate: 2025-07-31 14:27:06 +0000 >=20 > arm64: tidy up Top-Byte-Ignore (TBI) in the kernel >=20 > In preparation for TBI to be enabled for processes from 15.0 we = need > to clean up copying data between userspace and the kernel. These > functions will check the address is within the valid userspace = range, > however as the userspace and kernel ranges may overlap when TBI is > enabled we need to mask off the top 8 bits. >=20 > Processes not using TBI are unaffected as the hardware will still > check all bits in the address, however this will happen at the = first > load/store instruction. I thought Linux=E2=80=99s ABI was that addresses passed to the kernel = must be canonical. Is that false, or is there a reason we=E2=80=99re diverging = from that design choice? Jessica > Reviewed by: andrew > Sponsored by: Arm Ltd > Differential Revision: https://reviews.freebsd.org/D49119 > --- > sys/arm64/arm64/copyinout.S | 18 ++++++++++++++++-- > sys/arm64/arm64/support.S | 9 ++++++++- > sys/arm64/include/vmparam.h | 3 +++ > 3 files changed, 27 insertions(+), 3 deletions(-) >=20 > diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S > index 26dd0b4cf14f..e41c4b5f6734 100644 > --- a/sys/arm64/arm64/copyinout.S > +++ b/sys/arm64/arm64/copyinout.S > @@ -37,7 +37,14 @@ > #include "assym.inc" >=20 > .macro check_user_access user_arg, size_arg, bad_access_func > - adds x6, x\user_arg, x\size_arg > + /* > + * TBI is enabled from 15.0. Clear the top byte of the userspace > + * address before checking whether it's within the given limit. > + * The later load/store instructions will fault if TBI is disabled > + * for the current process. > + */ > + and x6, x\user_arg, #(~TBI_ADDR_MASK) > + adds x6, x6, x\size_arg > b.cs \bad_access_func > ldr x7, =3DVM_MAXUSER_ADDRESS > cmp x6, x7 > @@ -100,13 +107,20 @@ ENTRY(copyinstr) > adr x6, copyio_fault /* Get the handler address */ > SET_FAULT_HANDLER(x6, x7) /* Set the handler */ >=20 > + /* > + * As in check_user_access mask off the TBI bits for the cmp > + * instruction. The load will fail trap if TBI is disabled, but we > + * need to check the address didn't wrap. > + */ > + and x6, x0, #(~TBI_ADDR_MASK) > ldr x7, =3DVM_MAXUSER_ADDRESS > -1: cmp x0, x7 > +1: cmp x6, x7 > b.cs copyio_fault > ldtrb w4, [x0] /* Load from uaddr */ > add x0, x0, #1 /* Next char */ > strb w4, [x1], #1 /* Store in kaddr */ > add x5, x5, #1 /* count++ */ > + add x6, x6, #1 /* Increment masked address */ > cbz w4, 2f /* Break when NUL-terminated */ > sub x2, x2, #1 /* len-- */ > cbnz x2, 1b > diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S > index 2d067c7f7730..bf6fc931e4b0 100644 > --- a/sys/arm64/arm64/support.S > +++ b/sys/arm64/arm64/support.S > @@ -39,8 +39,15 @@ > #include "assym.inc" >=20 > .macro check_user_access user_arg, limit, bad_addr_func > + /* > + * TBI is enabled from 15.0. Clear the top byte of the userspace > + * address before checking whether it's within the given limit. > + * The later load/store instructions will fault if TBI is disabled > + * for the current process. > + */ > + and x6, x\user_arg, #(~TBI_ADDR_MASK) > ldr x7, =3D(\limit) > - cmp x\user_arg, x7 > + cmp x6, x7 > b.cs \bad_addr_func > .endm >=20 > diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h > index db3af1881282..c30ca1b2bff4 100644 > --- a/sys/arm64/include/vmparam.h > +++ b/sys/arm64/include/vmparam.h > @@ -211,6 +211,9 @@ > /* The address bits that hold a pointer authentication code */ > #define PAC_ADDR_MASK (0xff7f000000000000UL) >=20 > +/* The top-byte ignore address bits */ > +#define TBI_ADDR_MASK 0xff00000000000000UL > + > /* If true addr is in the kernel address space */ > #define ADDR_IS_KERNEL(addr) (((addr) & (1ul << 55)) =3D=3D (1ul << = 55)) > /* If true addr is in the user address space */
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?BA3B207C-6D34-4748-AE4D-DCD5AFA27789>