2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
12 Hardware driver for the AMD 768 Random Number Generator (RNG)
17 Hardware driver for Intel i810 Random Number Generator (RNG)
23 Copyright 2005 (c) MontaVista Software, Inc.
25 Please read Documentation/hw_random.txt for details on use.
27 ----------------------------------------------------------
28 This software may be used and distributed according to the terms
29 of the GNU General Public License, incorporated herein by reference.
34 #include <linux/device.h>
35 #include <linux/hw_random.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/miscdevice.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/err.h>
46 #include <asm/uaccess.h>
49 #define RNG_MODULE_NAME "hw_random"
50 #define PFX RNG_MODULE_NAME ": "
51 #define RNG_MISCDEV_MINOR 183 /* official */
54 static struct hwrng *current_rng;
55 static struct task_struct *hwrng_fill;
56 static LIST_HEAD(rng_list);
57 /* Protects rng_list and current_rng */
58 static DEFINE_MUTEX(rng_mutex);
59 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
60 static DEFINE_MUTEX(reading_mutex);
61 static int data_avail;
62 static u8 *rng_buffer, *rng_fillbuf;
63 static unsigned short current_quality;
64 static unsigned short default_quality; /* = 0; default to "off" */
66 module_param(current_quality, ushort, 0644);
67 MODULE_PARM_DESC(current_quality,
68 "current hwrng entropy estimation per mill");
69 module_param(default_quality, ushort, 0644);
70 MODULE_PARM_DESC(default_quality,
71 "default entropy content of hwrng per mill");
73 static void drop_current_rng(void);
74 static int hwrng_init(struct hwrng *rng);
75 static void start_khwrngd(void);
77 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
80 static size_t rng_buffer_size(void)
82 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
85 static void add_early_randomness(struct hwrng *rng)
87 unsigned char bytes[16];
90 mutex_lock(&reading_mutex);
91 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
92 mutex_unlock(&reading_mutex);
94 add_device_randomness(bytes, bytes_read);
97 static inline void cleanup_rng(struct kref *kref)
99 struct hwrng *rng = container_of(kref, struct hwrng, ref);
104 complete(&rng->cleanup_done);
107 static int set_current_rng(struct hwrng *rng)
111 BUG_ON(!mutex_is_locked(&rng_mutex));
113 err = hwrng_init(rng);
123 static void drop_current_rng(void)
125 BUG_ON(!mutex_is_locked(&rng_mutex));
129 /* decrease last reference for triggering the cleanup */
130 kref_put(¤t_rng->ref, cleanup_rng);
134 /* Returns ERR_PTR(), NULL or refcounted hwrng */
135 static struct hwrng *get_current_rng(void)
139 if (mutex_lock_interruptible(&rng_mutex))
140 return ERR_PTR(-ERESTARTSYS);
146 mutex_unlock(&rng_mutex);
150 static void put_rng(struct hwrng *rng)
153 * Hold rng_mutex here so we serialize in case they set_current_rng
154 * on rng again immediately.
156 mutex_lock(&rng_mutex);
158 kref_put(&rng->ref, cleanup_rng);
159 mutex_unlock(&rng_mutex);
162 static int hwrng_init(struct hwrng *rng)
164 if (kref_get_unless_zero(&rng->ref))
170 ret = rng->init(rng);
175 kref_init(&rng->ref);
176 reinit_completion(&rng->cleanup_done);
179 add_early_randomness(rng);
181 current_quality = rng->quality ? : default_quality;
182 if (current_quality > 1024)
183 current_quality = 1024;
185 if (current_quality == 0 && hwrng_fill)
186 kthread_stop(hwrng_fill);
187 if (current_quality > 0 && !hwrng_fill)
193 static int rng_dev_open(struct inode *inode, struct file *filp)
195 /* enforce read-only access to this chrdev */
196 if ((filp->f_mode & FMODE_READ) == 0)
198 if (filp->f_mode & FMODE_WRITE)
203 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
207 BUG_ON(!mutex_is_locked(&reading_mutex));
209 return rng->read(rng, (void *)buffer, size, wait);
211 if (rng->data_present)
212 present = rng->data_present(rng, wait);
217 return rng->data_read(rng, (u32 *)buffer);
222 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
223 size_t size, loff_t *offp)
231 rng = get_current_rng();
241 if (mutex_lock_interruptible(&reading_mutex)) {
246 bytes_read = rng_get_data(rng, rng_buffer,
248 !(filp->f_flags & O_NONBLOCK));
249 if (bytes_read < 0) {
251 goto out_unlock_reading;
253 data_avail = bytes_read;
257 if (filp->f_flags & O_NONBLOCK) {
259 goto out_unlock_reading;
268 if (copy_to_user(buf + ret, rng_buffer + data_avail,
271 goto out_unlock_reading;
278 mutex_unlock(&reading_mutex);
282 schedule_timeout_interruptible(1);
284 if (signal_pending(current)) {
293 mutex_unlock(&reading_mutex);
300 static const struct file_operations rng_chrdev_ops = {
301 .owner = THIS_MODULE,
302 .open = rng_dev_open,
303 .read = rng_dev_read,
304 .llseek = noop_llseek,
307 static const struct attribute_group *rng_dev_groups[];
309 static struct miscdevice rng_miscdev = {
310 .minor = RNG_MISCDEV_MINOR,
311 .name = RNG_MODULE_NAME,
313 .fops = &rng_chrdev_ops,
314 .groups = rng_dev_groups,
318 static ssize_t hwrng_attr_current_store(struct device *dev,
319 struct device_attribute *attr,
320 const char *buf, size_t len)
325 err = mutex_lock_interruptible(&rng_mutex);
329 list_for_each_entry(rng, &rng_list, list) {
330 if (sysfs_streq(rng->name, buf)) {
332 if (rng != current_rng)
333 err = set_current_rng(rng);
337 mutex_unlock(&rng_mutex);
342 static ssize_t hwrng_attr_current_show(struct device *dev,
343 struct device_attribute *attr,
349 rng = get_current_rng();
353 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
359 static ssize_t hwrng_attr_available_show(struct device *dev,
360 struct device_attribute *attr,
366 err = mutex_lock_interruptible(&rng_mutex);
370 list_for_each_entry(rng, &rng_list, list) {
371 strlcat(buf, rng->name, PAGE_SIZE);
372 strlcat(buf, " ", PAGE_SIZE);
374 strlcat(buf, "\n", PAGE_SIZE);
375 mutex_unlock(&rng_mutex);
380 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
381 hwrng_attr_current_show,
382 hwrng_attr_current_store);
383 static DEVICE_ATTR(rng_available, S_IRUGO,
384 hwrng_attr_available_show,
387 static struct attribute *rng_dev_attrs[] = {
388 &dev_attr_rng_current.attr,
389 &dev_attr_rng_available.attr,
393 ATTRIBUTE_GROUPS(rng_dev);
395 static void __exit unregister_miscdev(void)
397 misc_deregister(&rng_miscdev);
400 static int __init register_miscdev(void)
402 return misc_register(&rng_miscdev);
405 static int hwrng_fillfn(void *unused)
409 while (!kthread_should_stop()) {
412 rng = get_current_rng();
413 if (IS_ERR(rng) || !rng)
415 mutex_lock(&reading_mutex);
416 rc = rng_get_data(rng, rng_fillbuf,
417 rng_buffer_size(), 1);
418 mutex_unlock(&reading_mutex);
421 pr_warn("hwrng: no data available\n");
422 msleep_interruptible(10000);
425 /* Outside lock, sure, but y'know: randomness. */
426 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
427 rc * current_quality * 8 >> 10);
433 static void start_khwrngd(void)
435 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
436 if (IS_ERR(hwrng_fill)) {
437 pr_err("hwrng_fill thread creation failed");
442 int hwrng_register(struct hwrng *rng)
445 struct hwrng *old_rng, *tmp;
447 if (rng->name == NULL ||
448 (rng->data_read == NULL && rng->read == NULL))
451 mutex_lock(&rng_mutex);
453 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
456 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
461 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
468 /* Must not register two RNGs with the same name. */
470 list_for_each_entry(tmp, &rng_list, list) {
471 if (strcmp(tmp->name, rng->name) == 0)
475 init_completion(&rng->cleanup_done);
476 complete(&rng->cleanup_done);
478 old_rng = current_rng;
481 err = set_current_rng(rng);
485 list_add_tail(&rng->list, &rng_list);
487 if (old_rng && !rng->init) {
489 * Use a new device's input to add some randomness to
490 * the system. If this rng device isn't going to be
491 * used right away, its init function hasn't been
492 * called yet; so only use the randomness from devices
493 * that don't need an init callback.
495 add_early_randomness(rng);
499 mutex_unlock(&rng_mutex);
503 EXPORT_SYMBOL_GPL(hwrng_register);
505 void hwrng_unregister(struct hwrng *rng)
507 mutex_lock(&rng_mutex);
509 list_del(&rng->list);
510 if (current_rng == rng) {
512 if (!list_empty(&rng_list)) {
515 tail = list_entry(rng_list.prev, struct hwrng, list);
517 set_current_rng(tail);
521 if (list_empty(&rng_list)) {
522 mutex_unlock(&rng_mutex);
524 kthread_stop(hwrng_fill);
526 mutex_unlock(&rng_mutex);
528 wait_for_completion(&rng->cleanup_done);
530 EXPORT_SYMBOL_GPL(hwrng_unregister);
532 static void devm_hwrng_release(struct device *dev, void *res)
534 hwrng_unregister(*(struct hwrng **)res);
537 static int devm_hwrng_match(struct device *dev, void *res, void *data)
539 struct hwrng **r = res;
541 if (WARN_ON(!r || !*r))
547 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
552 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
556 error = hwrng_register(rng);
563 devres_add(dev, ptr);
566 EXPORT_SYMBOL_GPL(devm_hwrng_register);
568 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
570 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
572 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
574 static int __init hwrng_modinit(void)
576 return register_miscdev();
579 static void __exit hwrng_modexit(void)
581 mutex_lock(&rng_mutex);
585 mutex_unlock(&rng_mutex);
587 unregister_miscdev();
590 module_init(hwrng_modinit);
591 module_exit(hwrng_modexit);
593 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
594 MODULE_LICENSE("GPL");