Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 25 Oct 2023 17:30:21 GMT
From:      Ed Maste <emaste@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 575878a53382 - main - OpenSSL: regenerate asm files for 3.0.12
Message-ID:  <202310251730.39PHULPn081883@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by emaste:

URL: https://cgit.FreeBSD.org/src/commit/?id=575878a533823aa3e5bab715928d9cdffbc4dcbc

commit 575878a533823aa3e5bab715928d9cdffbc4dcbc
Author:     Ed Maste <emaste@FreeBSD.org>
AuthorDate: 2023-10-25 17:28:47 +0000
Commit:     Ed Maste <emaste@FreeBSD.org>
CommitDate: 2023-10-25 17:29:35 +0000

    OpenSSL: regenerate asm files for 3.0.12
    
    Fixes: ad991e4c142e ("OpenSSL: update to 3.0.12")
    Sponsored by:   The FreeBSD Foundation
---
 sys/crypto/openssl/aarch64/aesv8-armx.S     |  6 +++---
 sys/crypto/openssl/aarch64/arm_arch.h       |  7 +------
 sys/crypto/openssl/aarch64/ghashv8-armx.S   | 26 +++++++++++++-------------
 sys/crypto/openssl/aarch64/poly1305-armv8.S | 24 ++++++++++++------------
 4 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/sys/crypto/openssl/aarch64/aesv8-armx.S b/sys/crypto/openssl/aarch64/aesv8-armx.S
index 6c6dd095fb84..015c2eea6dbb 100644
--- a/sys/crypto/openssl/aarch64/aesv8-armx.S
+++ b/sys/crypto/openssl/aarch64/aesv8-armx.S
@@ -106,7 +106,7 @@ aes_v8_set_encrypt_key:
 .Loop192:
 	tbl	v6.16b,{v4.16b},v2.16b
 	ext	v5.16b,v0.16b,v3.16b,#12
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
 	st1	{v4.4s},[x2],#16
 	sub	x2,x2,#8
 #else
@@ -1520,7 +1520,7 @@ aes_v8_ctr32_encrypt_blocks:
 	ldr	w5,[x3,#240]
 
 	ldr	w8, [x4, #12]
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
 	ld1	{v0.16b},[x4]
 #else
 	ld1	{v0.4s},[x4]
@@ -1537,7 +1537,7 @@ aes_v8_ctr32_encrypt_blocks:
 	add	x7,x3,#32
 	mov	w6,w5
 	csel	x12,xzr,x12,lo
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev	w8, w8
 #endif
 	orr	v1.16b,v0.16b,v0.16b
diff --git a/sys/crypto/openssl/aarch64/arm_arch.h b/sys/crypto/openssl/aarch64/arm_arch.h
index a815a5c72b7f..7bedb385d971 100644
--- a/sys/crypto/openssl/aarch64/arm_arch.h
+++ b/sys/crypto/openssl/aarch64/arm_arch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2011-2022 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 2011-2023 The OpenSSL Project Authors. All Rights Reserved.
  *
  * Licensed under the Apache License 2.0 (the "License").  You may not use
  * this file except in compliance with the License.  You can obtain a copy
@@ -21,11 +21,6 @@
 #  elif defined(__GNUC__)
 #   if   defined(__aarch64__)
 #    define __ARM_ARCH__ 8
-#    if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
-#     define __ARMEB__
-#    else
-#     define __ARMEL__
-#    endif
   /*
    * Why doesn't gcc define __ARM_ARCH__? Instead it defines
    * bunch of below macros. See all_architectures[] table in
diff --git a/sys/crypto/openssl/aarch64/ghashv8-armx.S b/sys/crypto/openssl/aarch64/ghashv8-armx.S
index 9dec85cd4679..42f053d664ef 100644
--- a/sys/crypto/openssl/aarch64/ghashv8-armx.S
+++ b/sys/crypto/openssl/aarch64/ghashv8-armx.S
@@ -104,7 +104,7 @@ gcm_gmult_v8:
 	movi	v19.16b,#0xe1
 	ld1	{v20.2d,v21.2d},[x1]	//load twisted H, ...
 	shl	v19.2d,v19.2d,#57
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v17.16b,v17.16b
 #endif
 	ext	v3.16b,v17.16b,v17.16b,#8
@@ -129,7 +129,7 @@ gcm_gmult_v8:
 	eor	v18.16b,v18.16b,v2.16b
 	eor	v0.16b,v0.16b,v18.16b
 
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v0.16b,v0.16b
 #endif
 	ext	v0.16b,v0.16b,v0.16b,#8
@@ -167,14 +167,14 @@ gcm_ghash_v8:
 	ext	v0.16b,v0.16b,v0.16b,#8		//rotate Xi
 	ld1	{v16.2d},[x2],#16	//load [rotated] I[0]
 	shl	v19.2d,v19.2d,#57		//compose 0xc2.0 constant
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v16.16b,v16.16b
 	rev64	v0.16b,v0.16b
 #endif
 	ext	v3.16b,v16.16b,v16.16b,#8		//rotate I[0]
 	b.lo	.Lodd_tail_v8		//x3 was less than 32
 	ld1	{v17.2d},[x2],x12	//load [rotated] I[1]
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v17.16b,v17.16b
 #endif
 	ext	v7.16b,v17.16b,v17.16b,#8
@@ -206,13 +206,13 @@ gcm_ghash_v8:
 	eor	v18.16b,v0.16b,v2.16b
 	eor	v1.16b,v1.16b,v17.16b
 	ld1	{v17.2d},[x2],x12	//load [rotated] I[i+3]
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v16.16b,v16.16b
 #endif
 	eor	v1.16b,v1.16b,v18.16b
 	pmull	v18.1q,v0.1d,v19.1d		//1st phase of reduction
 
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v17.16b,v17.16b
 #endif
 	ins	v2.d[0],v1.d[1]
@@ -262,7 +262,7 @@ gcm_ghash_v8:
 	eor	v0.16b,v0.16b,v18.16b
 
 .Ldone_v8:
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v0.16b,v0.16b
 #endif
 	ext	v0.16b,v0.16b,v0.16b,#8
@@ -281,7 +281,7 @@ gcm_ghash_v8_4x:
 	shl	v19.2d,v19.2d,#57		//compose 0xc2.0 constant
 
 	ld1	{v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v0.16b,v0.16b
 	rev64	v5.16b,v5.16b
 	rev64	v6.16b,v6.16b
@@ -325,7 +325,7 @@ gcm_ghash_v8_4x:
 	eor	v16.16b,v4.16b,v0.16b
 	ld1	{v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
 	ext	v3.16b,v16.16b,v16.16b,#8
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v5.16b,v5.16b
 	rev64	v6.16b,v6.16b
 	rev64	v7.16b,v7.16b
@@ -408,7 +408,7 @@ gcm_ghash_v8_4x:
 	eor	v1.16b,v1.16b,v17.16b
 	ld1	{v4.2d,v5.2d,v6.2d},[x2]
 	eor	v1.16b,v1.16b,v18.16b
-#ifndef	__ARMEB__
+#ifndef	__AARCH64EB__
 	rev64	v5.16b,v5.16b
 	rev64	v6.16b,v6.16b
 	rev64	v4.16b,v4.16b
@@ -460,7 +460,7 @@ gcm_ghash_v8_4x:
 	eor	v1.16b,v1.16b,v17.16b
 	ld1	{v4.2d,v5.2d},[x2]
 	eor	v1.16b,v1.16b,v18.16b
-#ifndef	__ARMEB__
+#ifndef	__AARCH64EB__
 	rev64	v5.16b,v5.16b
 	rev64	v4.16b,v4.16b
 #endif
@@ -503,7 +503,7 @@ gcm_ghash_v8_4x:
 	eor	v1.16b,v1.16b,v17.16b
 	ld1	{v4.2d},[x2]
 	eor	v1.16b,v1.16b,v18.16b
-#ifndef	__ARMEB__
+#ifndef	__AARCH64EB__
 	rev64	v4.16b,v4.16b
 #endif
 
@@ -543,7 +543,7 @@ gcm_ghash_v8_4x:
 	eor	v0.16b,v0.16b,v18.16b
 	ext	v0.16b,v0.16b,v0.16b,#8
 
-#ifndef __ARMEB__
+#ifndef __AARCH64EB__
 	rev64	v0.16b,v0.16b
 #endif
 	st1	{v0.2d},[x0]		//write out Xi
diff --git a/sys/crypto/openssl/aarch64/poly1305-armv8.S b/sys/crypto/openssl/aarch64/poly1305-armv8.S
index 35de669f7398..8925984c3ee0 100644
--- a/sys/crypto/openssl/aarch64/poly1305-armv8.S
+++ b/sys/crypto/openssl/aarch64/poly1305-armv8.S
@@ -30,7 +30,7 @@ poly1305_init:
 	ldp	x7,x8,[x1]		// load key
 	mov	x9,#0xfffffffc0fffffff
 	movk	x9,#0x0fff,lsl#48
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x7,x7			// flip bytes
 	rev	x8,x8
 #endif
@@ -80,7 +80,7 @@ poly1305_blocks:
 .Loop:
 	ldp	x10,x11,[x1],#16	// load input
 	sub	x2,x2,#16
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x10,x10
 	rev	x11,x11
 #endif
@@ -148,13 +148,13 @@ poly1305_emit:
 	csel	x4,x4,x12,eq
 	csel	x5,x5,x13,eq
 
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	ror	x10,x10,#32		// flip nonce words
 	ror	x11,x11,#32
 #endif
 	adds	x4,x4,x10		// accumulate nonce
 	adc	x5,x5,x11
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x4,x4			// flip output bytes
 	rev	x5,x5
 #endif
@@ -277,7 +277,7 @@ poly1305_blocks_neon:
 	adcs	x5,x5,xzr
 	adc	x6,x6,xzr
 
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x12,x12
 	rev	x13,x13
 #endif
@@ -323,7 +323,7 @@ poly1305_blocks_neon:
 	ldp	x12,x13,[x1],#16	// load input
 	sub	x2,x2,#16
 	add	x9,x8,x8,lsr#2	// s1 = r1 + (r1 >> 2)
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x12,x12
 	rev	x13,x13
 #endif
@@ -408,7 +408,7 @@ poly1305_blocks_neon:
 	lsl	x3,x3,#24
 	add	x15,x0,#48
 
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x8,x8
 	rev	x12,x12
 	rev	x9,x9
@@ -444,7 +444,7 @@ poly1305_blocks_neon:
 	ld1	{v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
 	ld1	{v8.4s},[x15]
 
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x8,x8
 	rev	x12,x12
 	rev	x9,x9
@@ -505,7 +505,7 @@ poly1305_blocks_neon:
 	umull	v20.2d,v14.2s,v1.s[2]
 	ldp	x9,x13,[x16],#48
 	umull	v19.2d,v14.2s,v0.s[2]
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x8,x8
 	rev	x12,x12
 	rev	x9,x9
@@ -570,7 +570,7 @@ poly1305_blocks_neon:
 	umlal	v23.2d,v11.2s,v3.s[0]
 	umlal	v20.2d,v11.2s,v8.s[0]
 	umlal	v21.2d,v11.2s,v0.s[0]
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x8,x8
 	rev	x12,x12
 	rev	x9,x9
@@ -854,13 +854,13 @@ poly1305_emit_neon:
 	csel	x4,x4,x12,eq
 	csel	x5,x5,x13,eq
 
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	ror	x10,x10,#32		// flip nonce words
 	ror	x11,x11,#32
 #endif
 	adds	x4,x4,x10		// accumulate nonce
 	adc	x5,x5,x11
-#ifdef	__ARMEB__
+#ifdef	__AARCH64EB__
 	rev	x4,x4			// flip output bytes
 	rev	x5,x5
 #endif



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202310251730.39PHULPn081883>