2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
12 Hardware driver for the AMD 768 Random Number Generator (RNG)
17 Hardware driver for Intel i810 Random Number Generator (RNG)
23 Copyright 2005 (c) MontaVista Software, Inc.
25 Please read Documentation/hw_random.txt for details on use.
27 ----------------------------------------------------------
28 This software may be used and distributed according to the terms
29 of the GNU General Public License, incorporated herein by reference.
34 #include <linux/device.h>
35 #include <linux/hw_random.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/miscdevice.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/err.h>
46 #include <asm/uaccess.h>
49 #define RNG_MODULE_NAME "hw_random"
50 #define PFX RNG_MODULE_NAME ": "
51 #define RNG_MISCDEV_MINOR 183 /* official */
54 static struct hwrng *current_rng;
55 static struct task_struct *hwrng_fill;
56 static LIST_HEAD(rng_list);
57 /* Protects rng_list and current_rng */
58 static DEFINE_MUTEX(rng_mutex);
59 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
60 static DEFINE_MUTEX(reading_mutex);
61 static int data_avail;
62 static u8 *rng_buffer, *rng_fillbuf;
63 static unsigned short current_quality;
64 static unsigned short default_quality; /* = 0; default to "off" */
66 module_param(current_quality, ushort, 0644);
67 MODULE_PARM_DESC(current_quality,
68 "current hwrng entropy estimation per mill");
69 module_param(default_quality, ushort, 0644);
70 MODULE_PARM_DESC(default_quality,
71 "default entropy content of hwrng per mill");
73 static void drop_current_rng(void);
74 static int hwrng_init(struct hwrng *rng);
75 static void start_khwrngd(void);
77 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
80 static size_t rng_buffer_size(void)
82 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
85 static void add_early_randomness(struct hwrng *rng)
87 unsigned char bytes[16];
90 mutex_lock(&reading_mutex);
91 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
92 mutex_unlock(&reading_mutex);
94 add_device_randomness(bytes, bytes_read);
97 static inline void cleanup_rng(struct kref *kref)
99 struct hwrng *rng = container_of(kref, struct hwrng, ref);
104 complete(&rng->cleanup_done);
107 static int set_current_rng(struct hwrng *rng)
111 BUG_ON(!mutex_is_locked(&rng_mutex));
113 err = hwrng_init(rng);
123 static void drop_current_rng(void)
125 BUG_ON(!mutex_is_locked(&rng_mutex));
129 /* decrease last reference for triggering the cleanup */
130 kref_put(¤t_rng->ref, cleanup_rng);
134 /* Returns ERR_PTR(), NULL or refcounted hwrng */
135 static struct hwrng *get_current_rng(void)
139 if (mutex_lock_interruptible(&rng_mutex))
140 return ERR_PTR(-ERESTARTSYS);
146 mutex_unlock(&rng_mutex);
150 static void put_rng(struct hwrng *rng)
153 * Hold rng_mutex here so we serialize in case they set_current_rng
154 * on rng again immediately.
156 mutex_lock(&rng_mutex);
158 kref_put(&rng->ref, cleanup_rng);
159 mutex_unlock(&rng_mutex);
162 static int hwrng_init(struct hwrng *rng)
164 if (kref_get_unless_zero(&rng->ref))
170 ret = rng->init(rng);
175 kref_init(&rng->ref);
176 reinit_completion(&rng->cleanup_done);
179 add_early_randomness(rng);
181 current_quality = rng->quality ? : default_quality;
182 if (current_quality > 1024)
183 current_quality = 1024;
185 if (current_quality == 0 && hwrng_fill)
186 kthread_stop(hwrng_fill);
187 if (current_quality > 0 && !hwrng_fill)
193 static int rng_dev_open(struct inode *inode, struct file *filp)
195 /* enforce read-only access to this chrdev */
196 if ((filp->f_mode & FMODE_READ) == 0)
198 if (filp->f_mode & FMODE_WRITE)
203 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
207 BUG_ON(!mutex_is_locked(&reading_mutex));
209 return rng->read(rng, (void *)buffer, size, wait);
211 if (rng->data_present)
212 present = rng->data_present(rng, wait);
217 return rng->data_read(rng, (u32 *)buffer);
222 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
223 size_t size, loff_t *offp)
231 rng = get_current_rng();
241 mutex_lock(&reading_mutex);
243 bytes_read = rng_get_data(rng, rng_buffer,
245 !(filp->f_flags & O_NONBLOCK));
246 if (bytes_read < 0) {
248 goto out_unlock_reading;
250 data_avail = bytes_read;
254 if (filp->f_flags & O_NONBLOCK) {
256 goto out_unlock_reading;
265 if (copy_to_user(buf + ret, rng_buffer + data_avail,
268 goto out_unlock_reading;
275 mutex_unlock(&reading_mutex);
279 schedule_timeout_interruptible(1);
281 if (signal_pending(current)) {
290 mutex_unlock(&reading_mutex);
296 static const struct file_operations rng_chrdev_ops = {
297 .owner = THIS_MODULE,
298 .open = rng_dev_open,
299 .read = rng_dev_read,
300 .llseek = noop_llseek,
303 static const struct attribute_group *rng_dev_groups[];
305 static struct miscdevice rng_miscdev = {
306 .minor = RNG_MISCDEV_MINOR,
307 .name = RNG_MODULE_NAME,
309 .fops = &rng_chrdev_ops,
310 .groups = rng_dev_groups,
314 static ssize_t hwrng_attr_current_store(struct device *dev,
315 struct device_attribute *attr,
316 const char *buf, size_t len)
321 err = mutex_lock_interruptible(&rng_mutex);
325 list_for_each_entry(rng, &rng_list, list) {
326 if (strcmp(rng->name, buf) == 0) {
328 if (rng != current_rng)
329 err = set_current_rng(rng);
333 mutex_unlock(&rng_mutex);
338 static ssize_t hwrng_attr_current_show(struct device *dev,
339 struct device_attribute *attr,
345 rng = get_current_rng();
349 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
355 static ssize_t hwrng_attr_available_show(struct device *dev,
356 struct device_attribute *attr,
362 err = mutex_lock_interruptible(&rng_mutex);
366 list_for_each_entry(rng, &rng_list, list) {
367 strlcat(buf, rng->name, PAGE_SIZE);
368 strlcat(buf, " ", PAGE_SIZE);
370 strlcat(buf, "\n", PAGE_SIZE);
371 mutex_unlock(&rng_mutex);
376 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
377 hwrng_attr_current_show,
378 hwrng_attr_current_store);
379 static DEVICE_ATTR(rng_available, S_IRUGO,
380 hwrng_attr_available_show,
383 static struct attribute *rng_dev_attrs[] = {
384 &dev_attr_rng_current.attr,
385 &dev_attr_rng_available.attr,
389 ATTRIBUTE_GROUPS(rng_dev);
391 static void __exit unregister_miscdev(void)
393 misc_deregister(&rng_miscdev);
396 static int __init register_miscdev(void)
398 return misc_register(&rng_miscdev);
401 static int hwrng_fillfn(void *unused)
405 while (!kthread_should_stop()) {
408 rng = get_current_rng();
409 if (IS_ERR(rng) || !rng)
411 mutex_lock(&reading_mutex);
412 rc = rng_get_data(rng, rng_fillbuf,
413 rng_buffer_size(), 1);
414 mutex_unlock(&reading_mutex);
417 pr_warn("hwrng: no data available\n");
418 msleep_interruptible(10000);
421 /* Outside lock, sure, but y'know: randomness. */
422 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
423 rc * current_quality * 8 >> 10);
429 static void start_khwrngd(void)
431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
432 if (IS_ERR(hwrng_fill)) {
433 pr_err("hwrng_fill thread creation failed");
438 int hwrng_register(struct hwrng *rng)
441 struct hwrng *old_rng, *tmp;
443 if (rng->name == NULL ||
444 (rng->data_read == NULL && rng->read == NULL))
447 mutex_lock(&rng_mutex);
449 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
452 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
457 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
464 /* Must not register two RNGs with the same name. */
466 list_for_each_entry(tmp, &rng_list, list) {
467 if (strcmp(tmp->name, rng->name) == 0)
471 init_completion(&rng->cleanup_done);
472 complete(&rng->cleanup_done);
474 old_rng = current_rng;
477 err = set_current_rng(rng);
481 list_add_tail(&rng->list, &rng_list);
483 if (old_rng && !rng->init) {
485 * Use a new device's input to add some randomness to
486 * the system. If this rng device isn't going to be
487 * used right away, its init function hasn't been
488 * called yet; so only use the randomness from devices
489 * that don't need an init callback.
491 add_early_randomness(rng);
495 mutex_unlock(&rng_mutex);
499 EXPORT_SYMBOL_GPL(hwrng_register);
501 void hwrng_unregister(struct hwrng *rng)
503 mutex_lock(&rng_mutex);
505 list_del(&rng->list);
506 if (current_rng == rng) {
508 if (!list_empty(&rng_list)) {
511 tail = list_entry(rng_list.prev, struct hwrng, list);
513 set_current_rng(tail);
517 if (list_empty(&rng_list)) {
518 mutex_unlock(&rng_mutex);
520 kthread_stop(hwrng_fill);
522 mutex_unlock(&rng_mutex);
524 wait_for_completion(&rng->cleanup_done);
526 EXPORT_SYMBOL_GPL(hwrng_unregister);
528 static void devm_hwrng_release(struct device *dev, void *res)
530 hwrng_unregister(*(struct hwrng **)res);
533 static int devm_hwrng_match(struct device *dev, void *res, void *data)
535 struct hwrng **r = res;
537 if (WARN_ON(!r || !*r))
543 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
548 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
552 error = hwrng_register(rng);
559 devres_add(dev, ptr);
562 EXPORT_SYMBOL_GPL(devm_hwrng_register);
564 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
566 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
568 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
570 static int __init hwrng_modinit(void)
572 return register_miscdev();
575 static void __exit hwrng_modexit(void)
577 mutex_lock(&rng_mutex);
581 mutex_unlock(&rng_mutex);
583 unregister_miscdev();
586 module_init(hwrng_modinit);
587 module_exit(hwrng_modexit);
589 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
590 MODULE_LICENSE("GPL");