]>
Commit | Line | Data |
---|---|---|
1 | /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. | |
2 | * | |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License as published by | |
5 | * the Free Software Foundation; either version 2 of the License, or | |
6 | * (at your option) any later version. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/pci_ids.h> | |
13 | #include <linux/crypto.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <crypto/algapi.h> | |
16 | #include <crypto/aes.h> | |
17 | ||
18 | #include <linux/io.h> | |
19 | #include <linux/delay.h> | |
20 | ||
21 | #include "geode-aes.h" | |
22 | ||
23 | /* Static structures */ | |
24 | ||
25 | static void __iomem *_iobase; | |
26 | static spinlock_t lock; | |
27 | ||
28 | /* Write a 128 bit field (either a writable key or IV) */ | |
29 | static inline void | |
30 | _writefield(u32 offset, void *value) | |
31 | { | |
32 | int i; | |
33 | ||
34 | for (i = 0; i < 4; i++) | |
35 | iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); | |
36 | } | |
37 | ||
38 | /* Read a 128 bit field (either a writable key or IV) */ | |
39 | static inline void | |
40 | _readfield(u32 offset, void *value) | |
41 | { | |
42 | int i; | |
43 | ||
44 | for (i = 0; i < 4; i++) | |
45 | ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); | |
46 | } | |
47 | ||
48 | static int | |
49 | do_crypt(void *src, void *dst, int len, u32 flags) | |
50 | { | |
51 | u32 status; | |
52 | u32 counter = AES_OP_TIMEOUT; | |
53 | ||
54 | iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG); | |
55 | iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); | |
56 | iowrite32(len, _iobase + AES_LENA_REG); | |
57 | ||
58 | /* Start the operation */ | |
59 | iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); | |
60 | ||
61 | do { | |
62 | status = ioread32(_iobase + AES_INTR_REG); | |
63 | cpu_relax(); | |
64 | } while (!(status & AES_INTRA_PENDING) && --counter); | |
65 | ||
66 | /* Clear the event */ | |
67 | iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); | |
68 | return counter ? 0 : 1; | |
69 | } | |
70 | ||
71 | static unsigned int | |
72 | geode_aes_crypt(struct geode_aes_op *op) | |
73 | { | |
74 | u32 flags = 0; | |
75 | unsigned long iflags; | |
76 | int ret; | |
77 | ||
78 | if (op->len == 0) | |
79 | return 0; | |
80 | ||
81 | /* If the source and destination is the same, then | |
82 | * we need to turn on the coherent flags, otherwise | |
83 | * we don't need to worry | |
84 | */ | |
85 | ||
86 | flags |= (AES_CTRL_DCA | AES_CTRL_SCA); | |
87 | ||
88 | if (op->dir == AES_DIR_ENCRYPT) | |
89 | flags |= AES_CTRL_ENCRYPT; | |
90 | ||
91 | /* Start the critical section */ | |
92 | ||
93 | spin_lock_irqsave(&lock, iflags); | |
94 | ||
95 | if (op->mode == AES_MODE_CBC) { | |
96 | flags |= AES_CTRL_CBC; | |
97 | _writefield(AES_WRITEIV0_REG, op->iv); | |
98 | } | |
99 | ||
100 | if (!(op->flags & AES_FLAGS_HIDDENKEY)) { | |
101 | flags |= AES_CTRL_WRKEY; | |
102 | _writefield(AES_WRITEKEY0_REG, op->key); | |
103 | } | |
104 | ||
105 | ret = do_crypt(op->src, op->dst, op->len, flags); | |
106 | BUG_ON(ret); | |
107 | ||
108 | if (op->mode == AES_MODE_CBC) | |
109 | _readfield(AES_WRITEIV0_REG, op->iv); | |
110 | ||
111 | spin_unlock_irqrestore(&lock, iflags); | |
112 | ||
113 | return op->len; | |
114 | } | |
115 | ||
116 | /* CRYPTO-API Functions */ | |
117 | ||
118 | static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, | |
119 | unsigned int len) | |
120 | { | |
121 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
122 | unsigned int ret; | |
123 | ||
124 | op->keylen = len; | |
125 | ||
126 | if (len == AES_KEYSIZE_128) { | |
127 | memcpy(op->key, key, len); | |
128 | return 0; | |
129 | } | |
130 | ||
131 | if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { | |
132 | /* not supported at all */ | |
133 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
134 | return -EINVAL; | |
135 | } | |
136 | ||
137 | /* | |
138 | * The requested key size is not supported by HW, do a fallback | |
139 | */ | |
140 | op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
141 | op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); | |
142 | ||
143 | ret = crypto_cipher_setkey(op->fallback.cip, key, len); | |
144 | if (ret) { | |
145 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
146 | tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); | |
147 | } | |
148 | return ret; | |
149 | } | |
150 | ||
151 | static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, | |
152 | unsigned int len) | |
153 | { | |
154 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
155 | unsigned int ret; | |
156 | ||
157 | op->keylen = len; | |
158 | ||
159 | if (len == AES_KEYSIZE_128) { | |
160 | memcpy(op->key, key, len); | |
161 | return 0; | |
162 | } | |
163 | ||
164 | if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { | |
165 | /* not supported at all */ | |
166 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
167 | return -EINVAL; | |
168 | } | |
169 | ||
170 | /* | |
171 | * The requested key size is not supported by HW, do a fallback | |
172 | */ | |
173 | op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
174 | op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); | |
175 | ||
176 | ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); | |
177 | if (ret) { | |
178 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
179 | tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); | |
180 | } | |
181 | return ret; | |
182 | } | |
183 | ||
184 | static int fallback_blk_dec(struct blkcipher_desc *desc, | |
185 | struct scatterlist *dst, struct scatterlist *src, | |
186 | unsigned int nbytes) | |
187 | { | |
188 | unsigned int ret; | |
189 | struct crypto_blkcipher *tfm; | |
190 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
191 | ||
192 | tfm = desc->tfm; | |
193 | desc->tfm = op->fallback.blk; | |
194 | ||
195 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); | |
196 | ||
197 | desc->tfm = tfm; | |
198 | return ret; | |
199 | } | |
200 | static int fallback_blk_enc(struct blkcipher_desc *desc, | |
201 | struct scatterlist *dst, struct scatterlist *src, | |
202 | unsigned int nbytes) | |
203 | { | |
204 | unsigned int ret; | |
205 | struct crypto_blkcipher *tfm; | |
206 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
207 | ||
208 | tfm = desc->tfm; | |
209 | desc->tfm = op->fallback.blk; | |
210 | ||
211 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); | |
212 | ||
213 | desc->tfm = tfm; | |
214 | return ret; | |
215 | } | |
216 | ||
217 | static void | |
218 | geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |
219 | { | |
220 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
221 | ||
222 | if (unlikely(op->keylen != AES_KEYSIZE_128)) { | |
223 | crypto_cipher_encrypt_one(op->fallback.cip, out, in); | |
224 | return; | |
225 | } | |
226 | ||
227 | op->src = (void *) in; | |
228 | op->dst = (void *) out; | |
229 | op->mode = AES_MODE_ECB; | |
230 | op->flags = 0; | |
231 | op->len = AES_BLOCK_SIZE; | |
232 | op->dir = AES_DIR_ENCRYPT; | |
233 | ||
234 | geode_aes_crypt(op); | |
235 | } | |
236 | ||
237 | ||
238 | static void | |
239 | geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |
240 | { | |
241 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
242 | ||
243 | if (unlikely(op->keylen != AES_KEYSIZE_128)) { | |
244 | crypto_cipher_decrypt_one(op->fallback.cip, out, in); | |
245 | return; | |
246 | } | |
247 | ||
248 | op->src = (void *) in; | |
249 | op->dst = (void *) out; | |
250 | op->mode = AES_MODE_ECB; | |
251 | op->flags = 0; | |
252 | op->len = AES_BLOCK_SIZE; | |
253 | op->dir = AES_DIR_DECRYPT; | |
254 | ||
255 | geode_aes_crypt(op); | |
256 | } | |
257 | ||
258 | static int fallback_init_cip(struct crypto_tfm *tfm) | |
259 | { | |
260 | const char *name = crypto_tfm_alg_name(tfm); | |
261 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
262 | ||
263 | op->fallback.cip = crypto_alloc_cipher(name, 0, | |
264 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
265 | ||
266 | if (IS_ERR(op->fallback.cip)) { | |
267 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | |
268 | return PTR_ERR(op->fallback.cip); | |
269 | } | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | static void fallback_exit_cip(struct crypto_tfm *tfm) | |
275 | { | |
276 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
277 | ||
278 | crypto_free_cipher(op->fallback.cip); | |
279 | op->fallback.cip = NULL; | |
280 | } | |
281 | ||
282 | static struct crypto_alg geode_alg = { | |
283 | .cra_name = "aes", | |
284 | .cra_driver_name = "geode-aes", | |
285 | .cra_priority = 300, | |
286 | .cra_alignmask = 15, | |
287 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | | |
288 | CRYPTO_ALG_NEED_FALLBACK, | |
289 | .cra_init = fallback_init_cip, | |
290 | .cra_exit = fallback_exit_cip, | |
291 | .cra_blocksize = AES_BLOCK_SIZE, | |
292 | .cra_ctxsize = sizeof(struct geode_aes_op), | |
293 | .cra_module = THIS_MODULE, | |
294 | .cra_u = { | |
295 | .cipher = { | |
296 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
297 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
298 | .cia_setkey = geode_setkey_cip, | |
299 | .cia_encrypt = geode_encrypt, | |
300 | .cia_decrypt = geode_decrypt | |
301 | } | |
302 | } | |
303 | }; | |
304 | ||
305 | static int | |
306 | geode_cbc_decrypt(struct blkcipher_desc *desc, | |
307 | struct scatterlist *dst, struct scatterlist *src, | |
308 | unsigned int nbytes) | |
309 | { | |
310 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
311 | struct blkcipher_walk walk; | |
312 | int err, ret; | |
313 | ||
314 | if (unlikely(op->keylen != AES_KEYSIZE_128)) | |
315 | return fallback_blk_dec(desc, dst, src, nbytes); | |
316 | ||
317 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
318 | err = blkcipher_walk_virt(desc, &walk); | |
319 | op->iv = walk.iv; | |
320 | ||
321 | while ((nbytes = walk.nbytes)) { | |
322 | op->src = walk.src.virt.addr, | |
323 | op->dst = walk.dst.virt.addr; | |
324 | op->mode = AES_MODE_CBC; | |
325 | op->len = nbytes - (nbytes % AES_BLOCK_SIZE); | |
326 | op->dir = AES_DIR_DECRYPT; | |
327 | ||
328 | ret = geode_aes_crypt(op); | |
329 | ||
330 | nbytes -= ret; | |
331 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
332 | } | |
333 | ||
334 | return err; | |
335 | } | |
336 | ||
337 | static int | |
338 | geode_cbc_encrypt(struct blkcipher_desc *desc, | |
339 | struct scatterlist *dst, struct scatterlist *src, | |
340 | unsigned int nbytes) | |
341 | { | |
342 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
343 | struct blkcipher_walk walk; | |
344 | int err, ret; | |
345 | ||
346 | if (unlikely(op->keylen != AES_KEYSIZE_128)) | |
347 | return fallback_blk_enc(desc, dst, src, nbytes); | |
348 | ||
349 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
350 | err = blkcipher_walk_virt(desc, &walk); | |
351 | op->iv = walk.iv; | |
352 | ||
353 | while ((nbytes = walk.nbytes)) { | |
354 | op->src = walk.src.virt.addr, | |
355 | op->dst = walk.dst.virt.addr; | |
356 | op->mode = AES_MODE_CBC; | |
357 | op->len = nbytes - (nbytes % AES_BLOCK_SIZE); | |
358 | op->dir = AES_DIR_ENCRYPT; | |
359 | ||
360 | ret = geode_aes_crypt(op); | |
361 | nbytes -= ret; | |
362 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
363 | } | |
364 | ||
365 | return err; | |
366 | } | |
367 | ||
368 | static int fallback_init_blk(struct crypto_tfm *tfm) | |
369 | { | |
370 | const char *name = crypto_tfm_alg_name(tfm); | |
371 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
372 | ||
373 | op->fallback.blk = crypto_alloc_blkcipher(name, 0, | |
374 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
375 | ||
376 | if (IS_ERR(op->fallback.blk)) { | |
377 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | |
378 | return PTR_ERR(op->fallback.blk); | |
379 | } | |
380 | ||
381 | return 0; | |
382 | } | |
383 | ||
384 | static void fallback_exit_blk(struct crypto_tfm *tfm) | |
385 | { | |
386 | struct geode_aes_op *op = crypto_tfm_ctx(tfm); | |
387 | ||
388 | crypto_free_blkcipher(op->fallback.blk); | |
389 | op->fallback.blk = NULL; | |
390 | } | |
391 | ||
392 | static struct crypto_alg geode_cbc_alg = { | |
393 | .cra_name = "cbc(aes)", | |
394 | .cra_driver_name = "cbc-aes-geode", | |
395 | .cra_priority = 400, | |
396 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | |
397 | CRYPTO_ALG_KERN_DRIVER_ONLY | | |
398 | CRYPTO_ALG_NEED_FALLBACK, | |
399 | .cra_init = fallback_init_blk, | |
400 | .cra_exit = fallback_exit_blk, | |
401 | .cra_blocksize = AES_BLOCK_SIZE, | |
402 | .cra_ctxsize = sizeof(struct geode_aes_op), | |
403 | .cra_alignmask = 15, | |
404 | .cra_type = &crypto_blkcipher_type, | |
405 | .cra_module = THIS_MODULE, | |
406 | .cra_u = { | |
407 | .blkcipher = { | |
408 | .min_keysize = AES_MIN_KEY_SIZE, | |
409 | .max_keysize = AES_MAX_KEY_SIZE, | |
410 | .setkey = geode_setkey_blk, | |
411 | .encrypt = geode_cbc_encrypt, | |
412 | .decrypt = geode_cbc_decrypt, | |
413 | .ivsize = AES_BLOCK_SIZE, | |
414 | } | |
415 | } | |
416 | }; | |
417 | ||
418 | static int | |
419 | geode_ecb_decrypt(struct blkcipher_desc *desc, | |
420 | struct scatterlist *dst, struct scatterlist *src, | |
421 | unsigned int nbytes) | |
422 | { | |
423 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
424 | struct blkcipher_walk walk; | |
425 | int err, ret; | |
426 | ||
427 | if (unlikely(op->keylen != AES_KEYSIZE_128)) | |
428 | return fallback_blk_dec(desc, dst, src, nbytes); | |
429 | ||
430 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
431 | err = blkcipher_walk_virt(desc, &walk); | |
432 | ||
433 | while ((nbytes = walk.nbytes)) { | |
434 | op->src = walk.src.virt.addr, | |
435 | op->dst = walk.dst.virt.addr; | |
436 | op->mode = AES_MODE_ECB; | |
437 | op->len = nbytes - (nbytes % AES_BLOCK_SIZE); | |
438 | op->dir = AES_DIR_DECRYPT; | |
439 | ||
440 | ret = geode_aes_crypt(op); | |
441 | nbytes -= ret; | |
442 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
443 | } | |
444 | ||
445 | return err; | |
446 | } | |
447 | ||
448 | static int | |
449 | geode_ecb_encrypt(struct blkcipher_desc *desc, | |
450 | struct scatterlist *dst, struct scatterlist *src, | |
451 | unsigned int nbytes) | |
452 | { | |
453 | struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); | |
454 | struct blkcipher_walk walk; | |
455 | int err, ret; | |
456 | ||
457 | if (unlikely(op->keylen != AES_KEYSIZE_128)) | |
458 | return fallback_blk_enc(desc, dst, src, nbytes); | |
459 | ||
460 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
461 | err = blkcipher_walk_virt(desc, &walk); | |
462 | ||
463 | while ((nbytes = walk.nbytes)) { | |
464 | op->src = walk.src.virt.addr, | |
465 | op->dst = walk.dst.virt.addr; | |
466 | op->mode = AES_MODE_ECB; | |
467 | op->len = nbytes - (nbytes % AES_BLOCK_SIZE); | |
468 | op->dir = AES_DIR_ENCRYPT; | |
469 | ||
470 | ret = geode_aes_crypt(op); | |
471 | nbytes -= ret; | |
472 | ret = blkcipher_walk_done(desc, &walk, nbytes); | |
473 | } | |
474 | ||
475 | return err; | |
476 | } | |
477 | ||
478 | static struct crypto_alg geode_ecb_alg = { | |
479 | .cra_name = "ecb(aes)", | |
480 | .cra_driver_name = "ecb-aes-geode", | |
481 | .cra_priority = 400, | |
482 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | |
483 | CRYPTO_ALG_KERN_DRIVER_ONLY | | |
484 | CRYPTO_ALG_NEED_FALLBACK, | |
485 | .cra_init = fallback_init_blk, | |
486 | .cra_exit = fallback_exit_blk, | |
487 | .cra_blocksize = AES_BLOCK_SIZE, | |
488 | .cra_ctxsize = sizeof(struct geode_aes_op), | |
489 | .cra_alignmask = 15, | |
490 | .cra_type = &crypto_blkcipher_type, | |
491 | .cra_module = THIS_MODULE, | |
492 | .cra_u = { | |
493 | .blkcipher = { | |
494 | .min_keysize = AES_MIN_KEY_SIZE, | |
495 | .max_keysize = AES_MAX_KEY_SIZE, | |
496 | .setkey = geode_setkey_blk, | |
497 | .encrypt = geode_ecb_encrypt, | |
498 | .decrypt = geode_ecb_decrypt, | |
499 | } | |
500 | } | |
501 | }; | |
502 | ||
503 | static void geode_aes_remove(struct pci_dev *dev) | |
504 | { | |
505 | crypto_unregister_alg(&geode_alg); | |
506 | crypto_unregister_alg(&geode_ecb_alg); | |
507 | crypto_unregister_alg(&geode_cbc_alg); | |
508 | ||
509 | pci_iounmap(dev, _iobase); | |
510 | _iobase = NULL; | |
511 | ||
512 | pci_release_regions(dev); | |
513 | pci_disable_device(dev); | |
514 | } | |
515 | ||
516 | ||
517 | static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | |
518 | { | |
519 | int ret; | |
520 | ||
521 | ret = pci_enable_device(dev); | |
522 | if (ret) | |
523 | return ret; | |
524 | ||
525 | ret = pci_request_regions(dev, "geode-aes"); | |
526 | if (ret) | |
527 | goto eenable; | |
528 | ||
529 | _iobase = pci_iomap(dev, 0, 0); | |
530 | ||
531 | if (_iobase == NULL) { | |
532 | ret = -ENOMEM; | |
533 | goto erequest; | |
534 | } | |
535 | ||
536 | spin_lock_init(&lock); | |
537 | ||
538 | /* Clear any pending activity */ | |
539 | iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); | |
540 | ||
541 | ret = crypto_register_alg(&geode_alg); | |
542 | if (ret) | |
543 | goto eiomap; | |
544 | ||
545 | ret = crypto_register_alg(&geode_ecb_alg); | |
546 | if (ret) | |
547 | goto ealg; | |
548 | ||
549 | ret = crypto_register_alg(&geode_cbc_alg); | |
550 | if (ret) | |
551 | goto eecb; | |
552 | ||
553 | dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); | |
554 | return 0; | |
555 | ||
556 | eecb: | |
557 | crypto_unregister_alg(&geode_ecb_alg); | |
558 | ||
559 | ealg: | |
560 | crypto_unregister_alg(&geode_alg); | |
561 | ||
562 | eiomap: | |
563 | pci_iounmap(dev, _iobase); | |
564 | ||
565 | erequest: | |
566 | pci_release_regions(dev); | |
567 | ||
568 | eenable: | |
569 | pci_disable_device(dev); | |
570 | ||
571 | dev_err(&dev->dev, "GEODE AES initialization failed.\n"); | |
572 | return ret; | |
573 | } | |
574 | ||
575 | static struct pci_device_id geode_aes_tbl[] = { | |
576 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), }, | |
577 | { 0, } | |
578 | }; | |
579 | ||
580 | MODULE_DEVICE_TABLE(pci, geode_aes_tbl); | |
581 | ||
582 | static struct pci_driver geode_aes_driver = { | |
583 | .name = "Geode LX AES", | |
584 | .id_table = geode_aes_tbl, | |
585 | .probe = geode_aes_probe, | |
586 | .remove = geode_aes_remove, | |
587 | }; | |
588 | ||
589 | module_pci_driver(geode_aes_driver); | |
590 | ||
591 | MODULE_AUTHOR("Advanced Micro Devices, Inc."); | |
592 | MODULE_DESCRIPTION("Geode LX Hardware AES driver"); | |
593 | MODULE_LICENSE("GPL"); |