]>
Commit | Line | Data |
---|---|---|
09c434b8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
775e0c69 DM |
2 | /* Glue code for SHA512 hashing optimized for sparc64 crypto opcodes. |
3 | * | |
4 | * This is based largely upon crypto/sha512_generic.c | |
5 | * | |
6 | * Copyright (c) Jean-Luc Cooke <[email protected]> | |
7 | * Copyright (c) Andrew McDonald <[email protected]> | |
8 | * Copyright (c) 2003 Kyle McMartin <[email protected]> | |
9 | */ | |
10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | ||
13 | #include <crypto/internal/hash.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/mm.h> | |
775e0c69 | 17 | #include <linux/types.h> |
a24d22b2 | 18 | #include <crypto/sha2.h> |
775e0c69 DM |
19 | |
20 | #include <asm/pstate.h> | |
21 | #include <asm/elf.h> | |
22 | ||
10803624 DM |
23 | #include "opcodes.h" |
24 | ||
775e0c69 DM |
25 | asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data, |
26 | unsigned int rounds); | |
27 | ||
28 | static int sha512_sparc64_init(struct shash_desc *desc) | |
29 | { | |
30 | struct sha512_state *sctx = shash_desc_ctx(desc); | |
31 | sctx->state[0] = SHA512_H0; | |
32 | sctx->state[1] = SHA512_H1; | |
33 | sctx->state[2] = SHA512_H2; | |
34 | sctx->state[3] = SHA512_H3; | |
35 | sctx->state[4] = SHA512_H4; | |
36 | sctx->state[5] = SHA512_H5; | |
37 | sctx->state[6] = SHA512_H6; | |
38 | sctx->state[7] = SHA512_H7; | |
39 | sctx->count[0] = sctx->count[1] = 0; | |
40 | ||
41 | return 0; | |
42 | } | |
43 | ||
44 | static int sha384_sparc64_init(struct shash_desc *desc) | |
45 | { | |
46 | struct sha512_state *sctx = shash_desc_ctx(desc); | |
47 | sctx->state[0] = SHA384_H0; | |
48 | sctx->state[1] = SHA384_H1; | |
49 | sctx->state[2] = SHA384_H2; | |
50 | sctx->state[3] = SHA384_H3; | |
51 | sctx->state[4] = SHA384_H4; | |
52 | sctx->state[5] = SHA384_H5; | |
53 | sctx->state[6] = SHA384_H6; | |
54 | sctx->state[7] = SHA384_H7; | |
55 | sctx->count[0] = sctx->count[1] = 0; | |
56 | ||
57 | return 0; | |
58 | } | |
59 | ||
60 | static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data, | |
61 | unsigned int len, unsigned int partial) | |
62 | { | |
63 | unsigned int done = 0; | |
64 | ||
65 | if ((sctx->count[0] += len) < len) | |
66 | sctx->count[1]++; | |
67 | if (partial) { | |
68 | done = SHA512_BLOCK_SIZE - partial; | |
69 | memcpy(sctx->buf + partial, data, done); | |
70 | sha512_sparc64_transform(sctx->state, sctx->buf, 1); | |
71 | } | |
72 | if (len - done >= SHA512_BLOCK_SIZE) { | |
73 | const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; | |
74 | ||
75 | sha512_sparc64_transform(sctx->state, data + done, rounds); | |
76 | done += rounds * SHA512_BLOCK_SIZE; | |
77 | } | |
78 | ||
79 | memcpy(sctx->buf, data + done, len - done); | |
80 | } | |
81 | ||
82 | static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data, | |
83 | unsigned int len) | |
84 | { | |
85 | struct sha512_state *sctx = shash_desc_ctx(desc); | |
86 | unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; | |
87 | ||
88 | /* Handle the fast case right here */ | |
89 | if (partial + len < SHA512_BLOCK_SIZE) { | |
90 | if ((sctx->count[0] += len) < len) | |
91 | sctx->count[1]++; | |
92 | memcpy(sctx->buf + partial, data, len); | |
93 | } else | |
94 | __sha512_sparc64_update(sctx, data, len, partial); | |
95 | ||
96 | return 0; | |
97 | } | |
98 | ||
99 | static int sha512_sparc64_final(struct shash_desc *desc, u8 *out) | |
100 | { | |
101 | struct sha512_state *sctx = shash_desc_ctx(desc); | |
102 | unsigned int i, index, padlen; | |
103 | __be64 *dst = (__be64 *)out; | |
104 | __be64 bits[2]; | |
105 | static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; | |
106 | ||
107 | /* Save number of bits */ | |
108 | bits[1] = cpu_to_be64(sctx->count[0] << 3); | |
109 | bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); | |
110 | ||
111 | /* Pad out to 112 mod 128 and append length */ | |
112 | index = sctx->count[0] % SHA512_BLOCK_SIZE; | |
113 | padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index); | |
114 | ||
115 | /* We need to fill a whole block for __sha512_sparc64_update() */ | |
116 | if (padlen <= 112) { | |
117 | if ((sctx->count[0] += padlen) < padlen) | |
118 | sctx->count[1]++; | |
119 | memcpy(sctx->buf + index, padding, padlen); | |
120 | } else { | |
121 | __sha512_sparc64_update(sctx, padding, padlen, index); | |
122 | } | |
123 | __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112); | |
124 | ||
125 | /* Store state in digest */ | |
126 | for (i = 0; i < 8; i++) | |
127 | dst[i] = cpu_to_be64(sctx->state[i]); | |
128 | ||
129 | /* Wipe context */ | |
130 | memset(sctx, 0, sizeof(*sctx)); | |
131 | ||
132 | return 0; | |
133 | } | |
134 | ||
135 | static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash) | |
136 | { | |
137 | u8 D[64]; | |
138 | ||
139 | sha512_sparc64_final(desc, D); | |
140 | ||
141 | memcpy(hash, D, 48); | |
8202cd72 | 142 | memzero_explicit(D, 64); |
775e0c69 DM |
143 | |
144 | return 0; | |
145 | } | |
146 | ||
147 | static struct shash_alg sha512 = { | |
148 | .digestsize = SHA512_DIGEST_SIZE, | |
149 | .init = sha512_sparc64_init, | |
150 | .update = sha512_sparc64_update, | |
151 | .final = sha512_sparc64_final, | |
152 | .descsize = sizeof(struct sha512_state), | |
153 | .base = { | |
154 | .cra_name = "sha512", | |
155 | .cra_driver_name= "sha512-sparc64", | |
10803624 | 156 | .cra_priority = SPARC_CR_OPCODE_PRIORITY, |
775e0c69 DM |
157 | .cra_blocksize = SHA512_BLOCK_SIZE, |
158 | .cra_module = THIS_MODULE, | |
159 | } | |
160 | }; | |
161 | ||
162 | static struct shash_alg sha384 = { | |
163 | .digestsize = SHA384_DIGEST_SIZE, | |
164 | .init = sha384_sparc64_init, | |
165 | .update = sha512_sparc64_update, | |
166 | .final = sha384_sparc64_final, | |
167 | .descsize = sizeof(struct sha512_state), | |
168 | .base = { | |
169 | .cra_name = "sha384", | |
170 | .cra_driver_name= "sha384-sparc64", | |
10803624 | 171 | .cra_priority = SPARC_CR_OPCODE_PRIORITY, |
775e0c69 DM |
172 | .cra_blocksize = SHA384_BLOCK_SIZE, |
173 | .cra_module = THIS_MODULE, | |
174 | } | |
175 | }; | |
176 | ||
177 | static bool __init sparc64_has_sha512_opcode(void) | |
178 | { | |
179 | unsigned long cfr; | |
180 | ||
181 | if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) | |
182 | return false; | |
183 | ||
184 | __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); | |
185 | if (!(cfr & CFR_SHA512)) | |
186 | return false; | |
187 | ||
188 | return true; | |
189 | } | |
190 | ||
191 | static int __init sha512_sparc64_mod_init(void) | |
192 | { | |
193 | if (sparc64_has_sha512_opcode()) { | |
194 | int ret = crypto_register_shash(&sha384); | |
195 | if (ret < 0) | |
196 | return ret; | |
197 | ||
198 | ret = crypto_register_shash(&sha512); | |
199 | if (ret < 0) { | |
200 | crypto_unregister_shash(&sha384); | |
201 | return ret; | |
202 | } | |
203 | ||
204 | pr_info("Using sparc64 sha512 opcode optimized SHA-512/SHA-384 implementation\n"); | |
205 | return 0; | |
206 | } | |
207 | pr_info("sparc64 sha512 opcode not available.\n"); | |
208 | return -ENODEV; | |
209 | } | |
210 | ||
211 | static void __exit sha512_sparc64_mod_fini(void) | |
212 | { | |
213 | crypto_unregister_shash(&sha384); | |
214 | crypto_unregister_shash(&sha512); | |
215 | } | |
216 | ||
217 | module_init(sha512_sparc64_mod_init); | |
218 | module_exit(sha512_sparc64_mod_fini); | |
219 | ||
220 | MODULE_LICENSE("GPL"); | |
221 | MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); | |
222 | ||
5d26a105 KC |
223 | MODULE_ALIAS_CRYPTO("sha384"); |
224 | MODULE_ALIAS_CRYPTO("sha512"); | |
226f7cea DM |
225 | |
226 | #include "crop_devid.c" |