ares = 0; - } - - if (encrypt) { - ossl_aes_gcm_encrypt_avx512(&ctx->aes_ks, ctx, &ctx->gcm.mres, - in, len, out); - } else { - ossl_aes_gcm_decrypt_avx512(&ctx->aes_ks, ctx, &ctx->gcm.mres, - in, len, out); - } - - return 0; -} +#else +#error "Unsupported architecture!" +#endif -static int -gcm_encrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in, - unsigned char *out, size_t len) +static void +gcm_init(struct ossl_gcm_context *ctx, const void *key, size_t keylen) { - return _gcm_encrypt_avx512(ctx, in, out, len, true); -} + KASSERT(keylen == 128 || keylen == 192 || keylen == 256, + ("%s: invalid key length %zu", __func__, keylen)); -static int -gcm_decrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in, - unsigned char *out, size_t len) -{ - return _gcm_encrypt_avx512(ctx, in, out, len, false); + memset(&ctx->gcm, 0, sizeof(ctx->gcm)); + memset(&ctx->aes_ks, 0, sizeof(ctx->aes_ks)); + AES_set_encrypt_key(key, keylen, &ctx->aes_ks); + ctx->ops->init(ctx, key, keylen); } -static int -gcm_finish_avx512(struct ossl_gcm_context *ctx, const unsigned char *tag, - size_t len) +static void +gcm_tag_op(struct ossl_gcm_context *ctx, unsigned char *tag, size_t len) { - unsigned int *res = &ctx->gcm.mres; - - /* Finalize AAD processing */ - if (ctx->gcm.ares > 0) - res = &ctx->gcm.ares; - - ossl_aes_gcm_finalize_avx512(ctx, *res); - - ctx->gcm.ares = ctx->gcm.mres = 0; - - if (tag != NULL) - return timingsafe_bcmp(ctx->gcm.Xi.c, tag, len); - return 0; + (void)ctx->ops->finish(ctx, NULL, 0); + memcpy(tag, ctx->gcm.Xi.c, len); } -static const struct ossl_aes_gcm_ops gcm_ops_avx512 = { - .init = gcm_init_avx512, - .setiv = gcm_setiv_avx512, - .aad = gcm_aad_avx512, - .encrypt = gcm_encrypt_avx512, - .decrypt = gcm_decrypt_avx512, - .finish = gcm_finish_avx512, - .tag = gcm_tag, -}; - -size_t aesni_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len, - const void *key, unsigned char ivec[16], uint64_t *Xi); -size_t aesni_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len, - const void *key, unsigned char ivec[16], uint64_t *Xi); -void aesni_encrypt(const unsigned char *in, unsigned char *out, void *ks); -void aesni_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out, - size_t blocks, void *ks, const unsigned char *iv); - -void gcm_init_avx(__uint128_t Htable[16], uint64_t Xi[2]); -void gcm_gmult_avx(uint64_t Xi[2], const __uint128_t Htable[16]); -void gcm_ghash_avx(uint64_t Xi[2], const __uint128_t Htable[16], const void *in, - size_t len); - static void -gcm_init_aesni(struct ossl_gcm_context *ctx, const void *key, size_t keylen) +gcm_init_op(struct ossl_gcm_context *ctx, const void *key, size_t keylen) { - aesni_encrypt(ctx->gcm.H.c, ctx->gcm.H.c, &ctx->aes_ks); + AES_encrypt(ctx->gcm.H.c, ctx->gcm.H.c, &ctx->aes_ks); #if BYTE_ORDER == LITTLE_ENDIAN ctx->gcm.H.u[0] = bswap64(ctx->gcm.H.u[0]); ctx->gcm.H.u[1] = bswap64(ctx->gcm.H.u[1]); #endif - gcm_init_avx(ctx->gcm.Htable, ctx->gcm.H.u); + GCM_init(ctx->gcm.Htable, ctx->gcm.H.u); } static void -gcm_setiv_aesni(struct ossl_gcm_context *ctx, const unsigned char *iv, +gcm_setiv_op(struct ossl_gcm_context *ctx, const unsigned char *iv, size_t len) { uint32_t ctr; @@ -269,7 +196,7 @@ gcm_setiv_aesni(struct ossl_gcm_context *ctx, const unsigned char *iv, ctx->gcm.Xi.u[0] = 0; ctx->gcm.Xi.u[1] = 0; - aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c, &ctx->aes_ks); + AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c, &ctx->aes_ks); ctr++; #if BYTE_ORDER == LITTLE_ENDIAN @@ -280,7 +207,7 @@ gcm_setiv_aesni(struct ossl_gcm_context *ctx, const unsigned char *iv, } static int -gcm_aad_aesni(struct ossl_gcm_context *ctx, const unsigned char *aad, +gcm_aad_op(struct ossl_gcm_context *ctx, const unsigned char *aad, size_t len) { size_t i; @@ -303,14 +230,14 @@ gcm_aad_aesni(struct ossl_gcm_context *ctx, const unsigned char *aad, n = (n + 1) % 16; } if (n == 0) - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); else { ctx->gcm.ares = n; return 0; } } if ((i = (len & (size_t)-AES_BLOCK_LEN))) { - gcm_ghash_avx(ctx->gcm.Xi.u, ctx->gcm.Htable, aad, i); + GCM_ghash(ctx->gcm.Xi.u, ctx->gcm.Htable, aad, i); aad += i; len -= i; } @@ -341,7 +268,7 @@ gcm_encrypt(struct ossl_gcm_context *ctx, const unsigned char *in, if (ctx->gcm.ares) { /* First call to encrypt finalizes GHASH(AAD) */ - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); ctx->gcm.ares = 0; } @@ -354,7 +281,7 @@ gcm_encrypt(struct ossl_gcm_context *ctx, const unsigned char *in, n = mres % 16; for (i = 0; i < len; ++i) { if (n == 0) { - aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, + AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks); ++ctr; #if BYTE_ORDER == LITTLE_ENDIAN @@ -366,7 +293,7 @@ gcm_encrypt(struct ossl_gcm_context *ctx, const unsigned char *in, ctx->gcm.Xi.c[n] ^= out[i] = in[i] ^ ctx->gcm.EKi.c[n]; mres = n = (n + 1) % 16; if (n == 0) - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); } ctx->gcm.mres = mres; @@ -390,7 +317,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, if (ctx->gcm.ares) { /* First call to encrypt finalizes GHASH(AAD) */ - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); ctx->gcm.ares = 0; } @@ -408,7 +335,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, n = (n + 1) % 16; } if (n == 0) { - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); mres = 0; } else { ctx->gcm.mres = n; @@ -418,7 +345,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, if ((i = (len & (size_t)-16))) { size_t j = i / 16; - aesni_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c); + AES_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c); ctr += (unsigned int)j; #if BYTE_ORDER == LITTLE_ENDIAN ctx->gcm.Yi.d[3] = bswap32(ctr); @@ -430,12 +357,12 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, while (j--) { for (i = 0; i < 16; ++i) ctx->gcm.Xi.c[i] ^= out[i]; - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); out += 16; } } if (len) { - aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks); + AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks); ++ctr; #if BYTE_ORDER == LITTLE_ENDIAN ctx->gcm.Yi.d[3] = bswap32(ctr); @@ -453,7 +380,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, } static int -gcm_encrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in, +gcm_encrypt_op(struct ossl_gcm_context *ctx, const unsigned char *in, unsigned char *out, size_t len) { size_t bulk = 0, res; @@ -463,7 +390,7 @@ gcm_encrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in, if ((error = gcm_encrypt(ctx, in, out, res)) != 0) return error; - bulk = aesni_gcm_encrypt(in + res, out + res, len - res, + bulk = AES_gcm_encrypt(in + res, out + res, len - res, &ctx->aes_ks, ctx->gcm.Yi.c, ctx->gcm.Xi.u); ctx->gcm.len.u[1] += bulk; bulk += res; @@ -492,7 +419,7 @@ gcm_decrypt(struct ossl_gcm_context *ctx, const unsigned char *in, if (ctx->gcm.ares) { /* First call to encrypt finalizes GHASH(AAD) */ - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); ctx->gcm.ares = 0; } @@ -506,7 +433,7 @@ gcm_decrypt(struct ossl_gcm_context *ctx, const unsigned char *in, for (i = 0; i < len; ++i) { uint8_t c; if (n == 0) { - aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, + AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks); ++ctr; #if BYTE_ORDER == LITTLE_ENDIAN @@ -520,7 +447,7 @@ gcm_decrypt(struct ossl_gcm_context *ctx, const unsigned char *in, ctx->gcm.Xi.c[n] ^= c; mres = n = (n + 1) % 16; if (n == 0) - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); } ctx->gcm.mres = mres; @@ -544,7 +471,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, if (ctx->gcm.ares) { /* First call to decrypt finalizes GHASH(AAD) */ - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); ctx->gcm.ares = 0; } @@ -564,7 +491,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, n = (n + 1) % 16; } if (n == 0) { - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); mres = 0; } else { ctx->gcm.mres = n; @@ -578,12 +505,12 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, size_t k; for (k = 0; k < 16; ++k) ctx->gcm.Xi.c[k] ^= in[k]; - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); in += 16; } j = i / 16; in -= i; - aesni_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c); + AES_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c); ctr += (unsigned int)j; #if BYTE_ORDER == LITTLE_ENDIAN ctx->gcm.Yi.d[3] = bswap32(ctr); @@ -595,7 +522,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, len -= i; } if (len) { - aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks); + AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks); ++ctr; #if BYTE_ORDER == LITTLE_ENDIAN ctx->gcm.Yi.d[3] = bswap32(ctr); @@ -615,7 +542,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in, } static int -gcm_decrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in, +gcm_decrypt_op(struct ossl_gcm_context *ctx, const unsigned char *in, unsigned char *out, size_t len) { size_t bulk = 0, res; @@ -625,7 +552,7 @@ gcm_decrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in, if ((error = gcm_decrypt(ctx, in, out, res)) != 0) return error; - bulk = aesni_gcm_decrypt(in + res, out + res, len - res, &ctx->aes_ks, + bulk = AES_gcm_decrypt(in + res, out + res, len - res, &ctx->aes_ks, ctx->gcm.Yi.c, ctx->gcm.Xi.u); ctx->gcm.len.u[1] += bulk; bulk += res; @@ -637,14 +564,14 @@ gcm_decrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in, } static int -gcm_finish_aesni(struct ossl_gcm_context *ctx, const unsigned char *tag, +gcm_finish_op(struct ossl_gcm_context *ctx, const unsigned char *tag, size_t len) { uint64_t alen = ctx->gcm.len.u[0] << 3; uint64_t clen = ctx->gcm.len.u[1] << 3; if (ctx->gcm.mres || ctx->gcm.ares) - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); #if BYTE_ORDER == LITTLE_ENDIAN alen = bswap64(alen); @@ -653,7 +580,7 @@ gcm_finish_aesni(struct ossl_gcm_context *ctx, const unsigned char *tag, ctx->gcm.Xi.u[0] ^= alen; ctx->gcm.Xi.u[1] ^= clen; - gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable); + GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable); ctx->gcm.Xi.u[0] ^= ctx->gcm.EK0.u[0]; ctx->gcm.Xi.u[1] ^= ctx->gcm.EK0.u[1]; @@ -663,40 +590,26 @@ gcm_finish_aesni(struct ossl_gcm_context *ctx, const unsigned char *tag, return 0; } -static const struct ossl_aes_gcm_ops gcm_ops_aesni = { - .init = gcm_init_aesni, - .setiv = gcm_setiv_aesni, - .aad = gcm_aad_aesni, - .encrypt = gcm_encrypt_aesni, - .decrypt = gcm_decrypt_aesni, - .finish = gcm_finish_aesni, - .tag = gcm_tag, +static const struct ossl_aes_gcm_ops gcm_ops = { + .init = gcm_init_op, + .setiv = gcm_setiv_op, + .aad = gcm_aad_op, + .encrypt = gcm_encrypt_op, + .decrypt = gcm_decrypt_op, + .finish = gcm_finish_op, + .tag = gcm_tag_op, }; -int ossl_aes_gcm_setkey_aesni(const unsigned char *key, int klen, void *_ctx); - -int -ossl_aes_gcm_setkey_aesni(const unsigned char *key, int klen, - void *_ctx) -{ - struct ossl_gcm_context *ctx; - - ctx = _ctx; - ctx->ops = &gcm_ops_aesni; - gcm_init(ctx, key, klen); - return (0); -} - -int ossl_aes_gcm_setkey_avx512(const unsigned char *key, int klen, void *_ctx); +int ossl_aes_gcm_setkey(const unsigned char *key, int klen, void *_ctx); int -ossl_aes_gcm_setkey_avx512(const unsigned char *key, int klen, +ossl_aes_gcm_setkey(const unsigned char *key, int klen, void *_ctx) { struct ossl_gcm_context *ctx; ctx = _ctx; - ctx->ops = &gcm_ops_avx512; + ctx->ops = &gcm_ops; gcm_init(ctx, key, klen); return (0); } diff --git a/sys/crypto/openssl/ossl_ppc.c b/sys/crypto/openssl/ossl_ppc.c index 0951745c4b43..980211f46a76 100644 *** 91 LINES SKIPPED ***