]>
Commit | Line | Data |
---|---|---|
9fe757b0 JC |
1 | /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. |
2 | * | |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License as published by | |
5 | * the Free Software Foundation; either version 2 of the License, or | |
6 | * (at your option) any later version. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/kernel.h> | |
9fe757b0 JC |
11 | #include <linux/pci.h> |
12 | #include <linux/pci_ids.h> | |
13 | #include <linux/crypto.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <crypto/algapi.h> | |
89e12654 | 16 | #include <crypto/aes.h> |
9fe757b0 | 17 | |
99700716 CC |
18 | #include <linux/io.h> |
19 | #include <linux/delay.h> | |
9fe757b0 JC |
20 | |
21 | #include "geode-aes.h" | |
22 | ||
9fe757b0 JC |
23 | /* Static structures */ |
24 | ||
99700716 | 25 | static void __iomem *_iobase; |
9fe757b0 JC |
26 | static spinlock_t lock; |
27 | ||
28 | /* Write a 128 bit field (either a writable key or IV) */ | |
29 | static inline void | |
30 | _writefield(u32 offset, void *value) | |
31 | { | |
32 | int i; | |
99700716 | 33 | for (i = 0; i < 4; i++) |
9fe757b0 JC |
34 | iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); |
35 | } | |
36 | ||
37 | /* Read a 128 bit field (either a writable key or IV) */ | |
38 | static inline void | |
39 | _readfield(u32 offset, void *value) | |
40 | { | |
41 | int i; | |
99700716 | 42 | for (i = 0; i < 4; i++) |
9fe757b0 JC |
43 | ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); |
44 | } | |
45 | ||
46 | static int | |
47 | do_crypt(void *src, void *dst, int len, u32 flags) | |
48 | { | |
49 | u32 status; | |
50 | u32 counter = AES_OP_TIMEOUT; | |
51 | ||
52 | iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG); | |
53 | iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); | |
54 | iowrite32(len, _iobase + AES_LENA_REG); | |
55 | ||
56 | /* Start the operation */ | |
57 | iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); | |
58 | ||
1f4e4773 | 59 | do { |
9fe757b0 | 60 | status = ioread32(_iobase + AES_INTR_REG); |
1f4e4773 | 61 | cpu_relax(); |
99700716 | 62 | } while (!(status & AES_INTRA_PENDING) && --counter); |
9fe757b0 JC |
63 | |
64 | /* Clear the event */ | |
65 | iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); | |
66 | return counter ? 0 : 1; | |
67 | } | |
68 | ||
ab782705 | 69 | static unsigned int |
9fe757b0 JC |
70 | geode_aes_crypt(struct geode_aes_op *op) |
71 | { | |
9fe757b0 | 72 | u32 flags = 0; |
5efee174 | 73 | unsigned long iflags; |
1f4e4773 | 74 | int ret; |
9fe757b0 | 75 | |
761e7846 | 76 | if (op->len == 0) |
9fe757b0 JC |
77 | return 0; |
78 | ||
761e7846 JC |
79 | /* If the source and destination is the same, then |
80 | * we need to turn on the coherent flags, otherwise | |
81 | * we don't need to worry | |
82 | */ | |
83 | ||
2e21630d | 84 | flags |= (AES_CTRL_DCA | AES_CTRL_SCA); |
9fe757b0 JC |
85 | |
86 | if (op->dir == AES_DIR_ENCRYPT) | |
87 | flags |= AES_CTRL_ENCRYPT; | |
88 | ||
89 | /* Start the critical section */ | |
90 | ||
91 | spin_lock_irqsave(&lock, iflags); | |
92 | ||
93 | if (op->mode == AES_MODE_CBC) { | |
94 | flags |= AES_CTRL_CBC; | |
95 | _writefield(AES_WRITEIV0_REG, op->iv); | |
96 | } | |
97 | ||
761e7846 | 98 | if (!(op->flags & AES_FLAGS_HIDDENKEY)) { |
9fe757b0 JC |
99 | flags |= AES_CTRL_WRKEY; |
100 | _writefield(AES_WRITEKEY0_REG, op->key); | |
101 | } | |
102 | ||
1f4e4773 SS |
103 | ret = do_crypt(op->src, op->dst, op->len, flags); |
104 | BUG_ON(ret); | |
9fe757b0 JC |
105 | |
106 | if (op->mode == AES_MODE_CBC) | |
107 | _readfield(AES_WRITEIV0_REG, op->iv); | |
108 | ||
109 | spin_unlock_irqrestore(&lock, iflags); | |
110 | ||
111 | return op->len; | |
112 | } | |
113 | ||
114 | /* CRYPTO-API Functions */ | |
115 | ||
cd7c3bfe SS |
116 | static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, |
117 | unsigned int len) | |
9fe757b0 JC |
118 | { |
119 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
cd7c3bfe SS |
120 | unsigned int ret; |
121 | ||
122 | op->keylen = len; | |
123 | ||
124 | if (len == AES_KEYSIZE_128) { | |
125 | memcpy(op->key, key, len); | |
126 | return 0; | |
127 | } | |
9fe757b0 | 128 | |
cd7c3bfe SS |
129 | if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { |
130 | /* not supported at all */ | |
9fe757b0 JC |
131 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
132 | return -EINVAL; | |
133 | } | |
134 | ||
cd7c3bfe SS |
135 | /* |
136 | * The requested key size is not supported by HW, do a fallback | |
137 | */ | |
faad98f2 RK |
138 | op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
139 | op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); | |
cd7c3bfe SS |
140 | |
141 | ret = crypto_cipher_setkey(op->fallback.cip, key, len); | |
142 | if (ret) { | |
143 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
e054f164 | 144 | tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); |
cd7c3bfe SS |
145 | } |
146 | return ret; | |
147 | } | |
148 | ||
149 | static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, | |
150 | unsigned int len) | |
151 | { | |
152 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
153 | unsigned int ret; | |
154 | ||
155 | op->keylen = len; | |
156 | ||
157 | if (len == AES_KEYSIZE_128) { | |
158 | memcpy(op->key, key, len); | |
159 | return 0; | |
160 | } | |
161 | ||
162 | if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { | |
163 | /* not supported at all */ | |
164 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
165 | return -EINVAL; | |
166 | } | |
167 | ||
168 | /* | |
169 | * The requested key size is not supported by HW, do a fallback | |
170 | */ | |
171 | op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
172 | op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); | |
173 | ||
174 | ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); | |
175 | if (ret) { | |
176 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
177 | tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); | |
178 | } | |
179 | return ret; | |
180 | } | |
181 | ||
182 | static int fallback_blk_dec(struct blkcipher_desc *desc, | |
183 | struct scatterlist *dst, struct scatterlist *src, | |
184 | unsigned int nbytes) | |
185 | { | |
186 | unsigned int ret; | |
187 | struct crypto_blkcipher *tfm; | |
188 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
189 | ||
190 | tfm = desc->tfm; | |
191 | desc->tfm = op->fallback.blk; | |
192 | ||
fdc520aa | 193 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); |
cd7c3bfe SS |
194 | |
195 | desc->tfm = tfm; | |
196 | return ret; | |
197 | } | |
198 | static int fallback_blk_enc(struct blkcipher_desc *desc, | |
199 | struct scatterlist *dst, struct scatterlist *src, | |
200 | unsigned int nbytes) | |
201 | { | |
202 | unsigned int ret; | |
203 | struct crypto_blkcipher *tfm; | |
204 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
205 | ||
206 | tfm = desc->tfm; | |
207 | desc->tfm = op->fallback.blk; | |
208 | ||
fdc520aa | 209 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); |
cd7c3bfe SS |
210 | |
211 | desc->tfm = tfm; | |
212 | return ret; | |
9fe757b0 JC |
213 | } |
214 | ||
215 | static void | |
216 | geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |
217 | { | |
218 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
219 | ||
cd7c3bfe SS |
220 | if (unlikely(op->keylen != AES_KEYSIZE_128)) { |
221 | crypto_cipher_encrypt_one(op->fallback.cip, out, in); | |
9fe757b0 | 222 | return; |
cd7c3bfe | 223 | } |
9fe757b0 JC |
224 | |
225 | op->src = (void *) in; | |
226 | op->dst = (void *) out; | |
227 | op->mode = AES_MODE_ECB; | |
228 | op->flags = 0; | |
229 | op->len = AES_MIN_BLOCK_SIZE; | |
230 | op->dir = AES_DIR_ENCRYPT; | |
231 | ||
232 | geode_aes_crypt(op); | |
233 | } | |
234 | ||
235 | ||
236 | static void | |
237 | geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |
238 | { | |
239 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
240 | ||
cd7c3bfe SS |
241 | if (unlikely(op->keylen != AES_KEYSIZE_128)) { |
242 | crypto_cipher_decrypt_one(op->fallback.cip, out, in); | |
9fe757b0 | 243 | return; |
cd7c3bfe | 244 | } |
9fe757b0 JC |
245 | |
246 | op->src = (void *) in; | |
247 | op->dst = (void *) out; | |
248 | op->mode = AES_MODE_ECB; | |
249 | op->flags = 0; | |
250 | op->len = AES_MIN_BLOCK_SIZE; | |
251 | op->dir = AES_DIR_DECRYPT; | |
252 | ||
253 | geode_aes_crypt(op); | |
254 | } | |
255 | ||
cd7c3bfe SS |
256 | static int fallback_init_cip(struct crypto_tfm *tfm) |
257 | { | |
258 | const char *name = tfm->__crt_alg->cra_name; | |
259 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
260 | ||
261 | op->fallback.cip = crypto_alloc_cipher(name, 0, | |
262 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
263 | ||
264 | if (IS_ERR(op->fallback.cip)) { | |
265 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | |
faad98f2 | 266 | return PTR_ERR(op->fallback.cip); |
cd7c3bfe SS |
267 | } |
268 | ||
269 | return 0; | |
270 | } | |
271 | ||
272 | static void fallback_exit_cip(struct crypto_tfm *tfm) | |
273 | { | |
274 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
275 | ||
276 | crypto_free_cipher(op->fallback.cip); | |
277 | op->fallback.cip = NULL; | |
278 | } | |
9fe757b0 JC |
279 | |
280 | static struct crypto_alg geode_alg = { | |
cd7c3bfe SS |
281 | .cra_name = "aes", |
282 | .cra_driver_name = "geode-aes", | |
283 | .cra_priority = 300, | |
284 | .cra_alignmask = 15, | |
285 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | | |
286 | CRYPTO_ALG_NEED_FALLBACK, | |
287 | .cra_init = fallback_init_cip, | |
288 | .cra_exit = fallback_exit_cip, | |
9fe757b0 JC |
289 | .cra_blocksize = AES_MIN_BLOCK_SIZE, |
290 | .cra_ctxsize = sizeof(struct geode_aes_op), | |
cd7c3bfe | 291 | .cra_module = THIS_MODULE, |
cd7c3bfe SS |
292 | .cra_u = { |
293 | .cipher = { | |
294 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
295 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
296 | .cia_setkey = geode_setkey_cip, | |
297 | .cia_encrypt = geode_encrypt, | |
298 | .cia_decrypt = geode_decrypt | |
9fe757b0 JC |
299 | } |
300 | } | |
301 | }; | |
302 | ||
303 | static int | |
304 | geode_cbc_decrypt(struct blkcipher_desc *desc, | |
305 | struct scatterlist *dst, struct scatterlist *src, | |
306 | unsigned int nbytes) | |
307 | { | |
308 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
309 | struct blkcipher_walk walk; | |
310 | int err, ret; | |
311 | ||
cd7c3bfe SS |
312 | if (unlikely(op->keylen != AES_KEYSIZE_128)) |
313 | return fallback_blk_dec(desc, dst, src, nbytes); | |
314 | ||
9fe757b0 JC |
315 | blkcipher_walk_init(&walk, dst, src, nbytes); |
316 | err = blkcipher_walk_virt(desc, &walk); | |
d2456c66 | 317 | op->iv = walk.iv; |
9fe757b0 | 318 | |
99700716 | 319 | while ((nbytes = walk.nbytes)) { |
9fe757b0 JC |
320 | op->src = walk.src.virt.addr, |
321 | op->dst = walk.dst.virt.addr; | |
322 | op->mode = AES_MODE_CBC; | |
323 | op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); | |
324 | op->dir = AES_DIR_DECRYPT; | |
325 | ||
9fe757b0 JC |
326 | ret = geode_aes_crypt(op); |
327 | ||
9fe757b0 | 328 | nbytes -= ret; |
9fe757b0 JC |
329 | err = blkcipher_walk_done(desc, &walk, nbytes); |
330 | } | |
331 | ||
332 | return err; | |
333 | } | |
334 | ||
335 | static int | |
336 | geode_cbc_encrypt(struct blkcipher_desc *desc, | |
337 | struct scatterlist *dst, struct scatterlist *src, | |
338 | unsigned int nbytes) | |
339 | { | |
340 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
341 | struct blkcipher_walk walk; | |
342 | int err, ret; | |
343 | ||
cd7c3bfe SS |
344 | if (unlikely(op->keylen != AES_KEYSIZE_128)) |
345 | return fallback_blk_enc(desc, dst, src, nbytes); | |
346 | ||
9fe757b0 JC |
347 | blkcipher_walk_init(&walk, dst, src, nbytes); |
348 | err = blkcipher_walk_virt(desc, &walk); | |
d2456c66 | 349 | op->iv = walk.iv; |
9fe757b0 | 350 | |
99700716 | 351 | while ((nbytes = walk.nbytes)) { |
9fe757b0 JC |
352 | op->src = walk.src.virt.addr, |
353 | op->dst = walk.dst.virt.addr; | |
354 | op->mode = AES_MODE_CBC; | |
355 | op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); | |
356 | op->dir = AES_DIR_ENCRYPT; | |
357 | ||
9fe757b0 JC |
358 | ret = geode_aes_crypt(op); |
359 | nbytes -= ret; | |
360 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
361 | } | |
362 | ||
363 | return err; | |
364 | } | |
365 | ||
cd7c3bfe SS |
366 | static int fallback_init_blk(struct crypto_tfm *tfm) |
367 | { | |
368 | const char *name = tfm->__crt_alg->cra_name; | |
369 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
370 | ||
371 | op->fallback.blk = crypto_alloc_blkcipher(name, 0, | |
372 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
373 | ||
374 | if (IS_ERR(op->fallback.blk)) { | |
375 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | |
376 | return PTR_ERR(op->fallback.blk); | |
377 | } | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
382 | static void fallback_exit_blk(struct crypto_tfm *tfm) | |
383 | { | |
384 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
385 | ||
386 | crypto_free_blkcipher(op->fallback.blk); | |
387 | op->fallback.blk = NULL; | |
388 | } | |
389 | ||
9fe757b0 JC |
390 | static struct crypto_alg geode_cbc_alg = { |
391 | .cra_name = "cbc(aes)", | |
cd7c3bfe | 392 | .cra_driver_name = "cbc-aes-geode", |
9fe757b0 | 393 | .cra_priority = 400, |
cd7c3bfe | 394 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
d912bb76 NM |
395 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
396 | CRYPTO_ALG_NEED_FALLBACK, | |
cd7c3bfe SS |
397 | .cra_init = fallback_init_blk, |
398 | .cra_exit = fallback_exit_blk, | |
9fe757b0 JC |
399 | .cra_blocksize = AES_MIN_BLOCK_SIZE, |
400 | .cra_ctxsize = sizeof(struct geode_aes_op), | |
401 | .cra_alignmask = 15, | |
cd7c3bfe SS |
402 | .cra_type = &crypto_blkcipher_type, |
403 | .cra_module = THIS_MODULE, | |
cd7c3bfe SS |
404 | .cra_u = { |
405 | .blkcipher = { | |
406 | .min_keysize = AES_MIN_KEY_SIZE, | |
407 | .max_keysize = AES_MAX_KEY_SIZE, | |
408 | .setkey = geode_setkey_blk, | |
9fe757b0 JC |
409 | .encrypt = geode_cbc_encrypt, |
410 | .decrypt = geode_cbc_decrypt, | |
761e7846 | 411 | .ivsize = AES_IV_LENGTH, |
9fe757b0 JC |
412 | } |
413 | } | |
414 | }; | |
415 | ||
416 | static int | |
417 | geode_ecb_decrypt(struct blkcipher_desc *desc, | |
418 | struct scatterlist *dst, struct scatterlist *src, | |
419 | unsigned int nbytes) | |
420 | { | |
421 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
422 | struct blkcipher_walk walk; | |
423 | int err, ret; | |
424 | ||
cd7c3bfe SS |
425 | if (unlikely(op->keylen != AES_KEYSIZE_128)) |
426 | return fallback_blk_dec(desc, dst, src, nbytes); | |
427 | ||
9fe757b0 JC |
428 | blkcipher_walk_init(&walk, dst, src, nbytes); |
429 | err = blkcipher_walk_virt(desc, &walk); | |
430 | ||
99700716 | 431 | while ((nbytes = walk.nbytes)) { |
9fe757b0 JC |
432 | op->src = walk.src.virt.addr, |
433 | op->dst = walk.dst.virt.addr; | |
434 | op->mode = AES_MODE_ECB; | |
435 | op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); | |
436 | op->dir = AES_DIR_DECRYPT; | |
437 | ||
438 | ret = geode_aes_crypt(op); | |
439 | nbytes -= ret; | |
440 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
441 | } | |
442 | ||
443 | return err; | |
444 | } | |
445 | ||
446 | static int | |
447 | geode_ecb_encrypt(struct blkcipher_desc *desc, | |
448 | struct scatterlist *dst, struct scatterlist *src, | |
449 | unsigned int nbytes) | |
450 | { | |
451 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
452 | struct blkcipher_walk walk; | |
453 | int err, ret; | |
454 | ||
cd7c3bfe SS |
455 | if (unlikely(op->keylen != AES_KEYSIZE_128)) |
456 | return fallback_blk_enc(desc, dst, src, nbytes); | |
457 | ||
9fe757b0 JC |
458 | blkcipher_walk_init(&walk, dst, src, nbytes); |
459 | err = blkcipher_walk_virt(desc, &walk); | |
460 | ||
99700716 | 461 | while ((nbytes = walk.nbytes)) { |
9fe757b0 JC |
462 | op->src = walk.src.virt.addr, |
463 | op->dst = walk.dst.virt.addr; | |
464 | op->mode = AES_MODE_ECB; | |
465 | op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); | |
466 | op->dir = AES_DIR_ENCRYPT; | |
467 | ||
468 | ret = geode_aes_crypt(op); | |
469 | nbytes -= ret; | |
470 | ret = blkcipher_walk_done(desc, &walk, nbytes); | |
471 | } | |
472 | ||
473 | return err; | |
474 | } | |
475 | ||
476 | static struct crypto_alg geode_ecb_alg = { | |
cd7c3bfe SS |
477 | .cra_name = "ecb(aes)", |
478 | .cra_driver_name = "ecb-aes-geode", | |
9fe757b0 | 479 | .cra_priority = 400, |
cd7c3bfe | 480 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
d912bb76 NM |
481 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
482 | CRYPTO_ALG_NEED_FALLBACK, | |
cd7c3bfe SS |
483 | .cra_init = fallback_init_blk, |
484 | .cra_exit = fallback_exit_blk, | |
9fe757b0 JC |
485 | .cra_blocksize = AES_MIN_BLOCK_SIZE, |
486 | .cra_ctxsize = sizeof(struct geode_aes_op), | |
487 | .cra_alignmask = 15, | |
cd7c3bfe SS |
488 | .cra_type = &crypto_blkcipher_type, |
489 | .cra_module = THIS_MODULE, | |
cd7c3bfe SS |
490 | .cra_u = { |
491 | .blkcipher = { | |
492 | .min_keysize = AES_MIN_KEY_SIZE, | |
493 | .max_keysize = AES_MAX_KEY_SIZE, | |
494 | .setkey = geode_setkey_blk, | |
9fe757b0 JC |
495 | .encrypt = geode_ecb_encrypt, |
496 | .decrypt = geode_ecb_decrypt, | |
497 | } | |
498 | } | |
499 | }; | |
500 | ||
49cfe4db | 501 | static void geode_aes_remove(struct pci_dev *dev) |
9fe757b0 JC |
502 | { |
503 | crypto_unregister_alg(&geode_alg); | |
504 | crypto_unregister_alg(&geode_ecb_alg); | |
505 | crypto_unregister_alg(&geode_cbc_alg); | |
506 | ||
507 | pci_iounmap(dev, _iobase); | |
508 | _iobase = NULL; | |
509 | ||
510 | pci_release_regions(dev); | |
511 | pci_disable_device(dev); | |
512 | } | |
513 | ||
514 | ||
49cfe4db | 515 | static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) |
9fe757b0 JC |
516 | { |
517 | int ret; | |
99700716 CC |
518 | ret = pci_enable_device(dev); |
519 | if (ret) | |
9fe757b0 JC |
520 | return ret; |
521 | ||
99700716 CC |
522 | ret = pci_request_regions(dev, "geode-aes"); |
523 | if (ret) | |
9fe757b0 JC |
524 | goto eenable; |
525 | ||
526 | _iobase = pci_iomap(dev, 0, 0); | |
527 | ||
528 | if (_iobase == NULL) { | |
529 | ret = -ENOMEM; | |
530 | goto erequest; | |
531 | } | |
532 | ||
533 | spin_lock_init(&lock); | |
534 | ||
535 | /* Clear any pending activity */ | |
536 | iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); | |
537 | ||
99700716 CC |
538 | ret = crypto_register_alg(&geode_alg); |
539 | if (ret) | |
9fe757b0 JC |
540 | goto eiomap; |
541 | ||
99700716 CC |
542 | ret = crypto_register_alg(&geode_ecb_alg); |
543 | if (ret) | |
9fe757b0 JC |
544 | goto ealg; |
545 | ||
99700716 CC |
546 | ret = crypto_register_alg(&geode_cbc_alg); |
547 | if (ret) | |
9fe757b0 JC |
548 | goto eecb; |
549 | ||
550 | printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); | |
551 | return 0; | |
552 | ||
553 | eecb: | |
554 | crypto_unregister_alg(&geode_ecb_alg); | |
555 | ||
556 | ealg: | |
557 | crypto_unregister_alg(&geode_alg); | |
558 | ||
559 | eiomap: | |
560 | pci_iounmap(dev, _iobase); | |
561 | ||
562 | erequest: | |
563 | pci_release_regions(dev); | |
564 | ||
565 | eenable: | |
566 | pci_disable_device(dev); | |
567 | ||
568 | printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n"); | |
569 | return ret; | |
570 | } | |
571 | ||
572 | static struct pci_device_id geode_aes_tbl[] = { | |
1fb1defb | 573 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } , |
9fe757b0 JC |
574 | { 0, } |
575 | }; | |
576 | ||
577 | MODULE_DEVICE_TABLE(pci, geode_aes_tbl); | |
578 | ||
579 | static struct pci_driver geode_aes_driver = { | |
580 | .name = "Geode LX AES", | |
581 | .id_table = geode_aes_tbl, | |
582 | .probe = geode_aes_probe, | |
49cfe4db | 583 | .remove = geode_aes_remove, |
9fe757b0 JC |
584 | }; |
585 | ||
49d30d3d | 586 | module_pci_driver(geode_aes_driver); |
9fe757b0 JC |
587 | |
588 | MODULE_AUTHOR("Advanced Micro Devices, Inc."); | |
589 | MODULE_DESCRIPTION("Geode LX Hardware AES driver"); | |
590 | MODULE_LICENSE("GPL"); |