]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
97894cda | 2 | * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $ |
1da177e4 LT |
3 | * |
4 | * (C) 2003 David Woodhouse <[email protected]> | |
5 | * | |
6 | * Interface to Linux 2.5 block layer for MTD 'translation layers'. | |
7 | * | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/mtd/blktrans.h> | |
16 | #include <linux/mtd/mtd.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/blkpg.h> | |
83144186 | 19 | #include <linux/freezer.h> |
1da177e4 LT |
20 | #include <linux/spinlock.h> |
21 | #include <linux/hdreg.h> | |
22 | #include <linux/init.h> | |
48b19268 | 23 | #include <linux/mutex.h> |
99f9b243 | 24 | #include <linux/kthread.h> |
1da177e4 | 25 | #include <asm/uaccess.h> |
1da177e4 | 26 | |
356d70f1 | 27 | #include "mtdcore.h" |
1da177e4 | 28 | |
356d70f1 | 29 | static LIST_HEAD(blktrans_majors); |
1da177e4 LT |
30 | |
31 | struct mtd_blkcore_priv { | |
3e67fe45 | 32 | struct task_struct *thread; |
1da177e4 LT |
33 | struct request_queue *rq; |
34 | spinlock_t queue_lock; | |
35 | }; | |
36 | ||
37 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |
38 | struct mtd_blktrans_dev *dev, | |
39 | struct request *req) | |
40 | { | |
41 | unsigned long block, nsect; | |
42 | char *buf; | |
43 | ||
19187672 RP |
44 | block = req->sector << 9 >> tr->blkshift; |
45 | nsect = req->current_nr_sectors << 9 >> tr->blkshift; | |
46 | ||
1da177e4 LT |
47 | buf = req->buffer; |
48 | ||
4aff5e23 | 49 | if (!blk_fs_request(req)) |
1da177e4 LT |
50 | return 0; |
51 | ||
19187672 | 52 | if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) |
1da177e4 LT |
53 | return 0; |
54 | ||
55 | switch(rq_data_dir(req)) { | |
56 | case READ: | |
19187672 | 57 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
1da177e4 LT |
58 | if (tr->readsect(dev, block, buf)) |
59 | return 0; | |
60 | return 1; | |
61 | ||
62 | case WRITE: | |
63 | if (!tr->writesect) | |
64 | return 0; | |
65 | ||
19187672 | 66 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
1da177e4 LT |
67 | if (tr->writesect(dev, block, buf)) |
68 | return 0; | |
69 | return 1; | |
70 | ||
71 | default: | |
9a292308 | 72 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); |
1da177e4 LT |
73 | return 0; |
74 | } | |
75 | } | |
76 | ||
77 | static int mtd_blktrans_thread(void *arg) | |
78 | { | |
79 | struct mtd_blktrans_ops *tr = arg; | |
80 | struct request_queue *rq = tr->blkcore_priv->rq; | |
81 | ||
82 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ | |
83144186 | 83 | current->flags |= PF_MEMALLOC; |
1da177e4 | 84 | |
1da177e4 | 85 | spin_lock_irq(rq->queue_lock); |
3e67fe45 | 86 | while (!kthread_should_stop()) { |
1da177e4 LT |
87 | struct request *req; |
88 | struct mtd_blktrans_dev *dev; | |
89 | int res = 0; | |
1da177e4 LT |
90 | |
91 | req = elv_next_request(rq); | |
92 | ||
93 | if (!req) { | |
1da177e4 | 94 | set_current_state(TASK_INTERRUPTIBLE); |
1da177e4 | 95 | spin_unlock_irq(rq->queue_lock); |
1da177e4 | 96 | schedule(); |
1da177e4 | 97 | spin_lock_irq(rq->queue_lock); |
1da177e4 LT |
98 | continue; |
99 | } | |
100 | ||
101 | dev = req->rq_disk->private_data; | |
102 | tr = dev->tr; | |
103 | ||
104 | spin_unlock_irq(rq->queue_lock); | |
105 | ||
48b19268 | 106 | mutex_lock(&dev->lock); |
1da177e4 | 107 | res = do_blktrans_request(tr, dev, req); |
48b19268 | 108 | mutex_unlock(&dev->lock); |
1da177e4 LT |
109 | |
110 | spin_lock_irq(rq->queue_lock); | |
111 | ||
112 | end_request(req, res); | |
113 | } | |
114 | spin_unlock_irq(rq->queue_lock); | |
115 | ||
3e67fe45 | 116 | return 0; |
1da177e4 LT |
117 | } |
118 | ||
119 | static void mtd_blktrans_request(struct request_queue *rq) | |
120 | { | |
121 | struct mtd_blktrans_ops *tr = rq->queuedata; | |
3e67fe45 | 122 | wake_up_process(tr->blkcore_priv->thread); |
1da177e4 LT |
123 | } |
124 | ||
125 | ||
126 | static int blktrans_open(struct inode *i, struct file *f) | |
127 | { | |
128 | struct mtd_blktrans_dev *dev; | |
129 | struct mtd_blktrans_ops *tr; | |
130 | int ret = -ENODEV; | |
131 | ||
132 | dev = i->i_bdev->bd_disk->private_data; | |
133 | tr = dev->tr; | |
134 | ||
135 | if (!try_module_get(dev->mtd->owner)) | |
136 | goto out; | |
137 | ||
138 | if (!try_module_get(tr->owner)) | |
139 | goto out_tr; | |
140 | ||
97894cda | 141 | /* FIXME: Locking. A hot pluggable device can go away |
1da177e4 LT |
142 | (del_mtd_device can be called for it) without its module |
143 | being unloaded. */ | |
144 | dev->mtd->usecount++; | |
145 | ||
146 | ret = 0; | |
147 | if (tr->open && (ret = tr->open(dev))) { | |
148 | dev->mtd->usecount--; | |
149 | module_put(dev->mtd->owner); | |
150 | out_tr: | |
151 | module_put(tr->owner); | |
152 | } | |
153 | out: | |
154 | return ret; | |
155 | } | |
156 | ||
157 | static int blktrans_release(struct inode *i, struct file *f) | |
158 | { | |
159 | struct mtd_blktrans_dev *dev; | |
160 | struct mtd_blktrans_ops *tr; | |
161 | int ret = 0; | |
162 | ||
163 | dev = i->i_bdev->bd_disk->private_data; | |
164 | tr = dev->tr; | |
165 | ||
166 | if (tr->release) | |
167 | ret = tr->release(dev); | |
168 | ||
169 | if (!ret) { | |
170 | dev->mtd->usecount--; | |
171 | module_put(dev->mtd->owner); | |
172 | module_put(tr->owner); | |
173 | } | |
174 | ||
175 | return ret; | |
176 | } | |
177 | ||
a885c8c4 CH |
178 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
179 | { | |
180 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | |
181 | ||
182 | if (dev->tr->getgeo) | |
183 | return dev->tr->getgeo(dev, geo); | |
184 | return -ENOTTY; | |
185 | } | |
1da177e4 | 186 | |
97894cda | 187 | static int blktrans_ioctl(struct inode *inode, struct file *file, |
1da177e4 LT |
188 | unsigned int cmd, unsigned long arg) |
189 | { | |
190 | struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; | |
191 | struct mtd_blktrans_ops *tr = dev->tr; | |
192 | ||
193 | switch (cmd) { | |
194 | case BLKFLSBUF: | |
195 | if (tr->flush) | |
196 | return tr->flush(dev); | |
197 | /* The core code did the work, we had nothing to do. */ | |
198 | return 0; | |
1da177e4 LT |
199 | default: |
200 | return -ENOTTY; | |
201 | } | |
202 | } | |
203 | ||
8f026f75 | 204 | static struct block_device_operations mtd_blktrans_ops = { |
1da177e4 LT |
205 | .owner = THIS_MODULE, |
206 | .open = blktrans_open, | |
207 | .release = blktrans_release, | |
208 | .ioctl = blktrans_ioctl, | |
a885c8c4 | 209 | .getgeo = blktrans_getgeo, |
1da177e4 LT |
210 | }; |
211 | ||
212 | int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |
213 | { | |
214 | struct mtd_blktrans_ops *tr = new->tr; | |
215 | struct list_head *this; | |
216 | int last_devnum = -1; | |
217 | struct gendisk *gd; | |
218 | ||
b3561ea9 | 219 | if (mutex_trylock(&mtd_table_mutex)) { |
48b19268 | 220 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
221 | BUG(); |
222 | } | |
223 | ||
224 | list_for_each(this, &tr->devs) { | |
225 | struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list); | |
226 | if (new->devnum == -1) { | |
227 | /* Use first free number */ | |
228 | if (d->devnum != last_devnum+1) { | |
229 | /* Found a free devnum. Plug it in here */ | |
230 | new->devnum = last_devnum+1; | |
231 | list_add_tail(&new->list, &d->list); | |
232 | goto added; | |
233 | } | |
234 | } else if (d->devnum == new->devnum) { | |
235 | /* Required number taken */ | |
236 | return -EBUSY; | |
237 | } else if (d->devnum > new->devnum) { | |
238 | /* Required number was free */ | |
239 | list_add_tail(&new->list, &d->list); | |
240 | goto added; | |
97894cda | 241 | } |
1da177e4 LT |
242 | last_devnum = d->devnum; |
243 | } | |
244 | if (new->devnum == -1) | |
245 | new->devnum = last_devnum+1; | |
246 | ||
247 | if ((new->devnum << tr->part_bits) > 256) { | |
248 | return -EBUSY; | |
249 | } | |
250 | ||
1da177e4 LT |
251 | list_add_tail(&new->list, &tr->devs); |
252 | added: | |
ce37ab42 | 253 | mutex_init(&new->lock); |
1da177e4 LT |
254 | if (!tr->writesect) |
255 | new->readonly = 1; | |
256 | ||
257 | gd = alloc_disk(1 << tr->part_bits); | |
258 | if (!gd) { | |
259 | list_del(&new->list); | |
260 | return -ENOMEM; | |
261 | } | |
262 | gd->major = tr->major; | |
263 | gd->first_minor = (new->devnum) << tr->part_bits; | |
264 | gd->fops = &mtd_blktrans_ops; | |
97894cda | 265 | |
65a8de36 TP |
266 | if (tr->part_bits) |
267 | if (new->devnum < 26) | |
268 | snprintf(gd->disk_name, sizeof(gd->disk_name), | |
269 | "%s%c", tr->name, 'a' + new->devnum); | |
270 | else | |
271 | snprintf(gd->disk_name, sizeof(gd->disk_name), | |
272 | "%s%c%c", tr->name, | |
273 | 'a' - 1 + new->devnum / 26, | |
274 | 'a' + new->devnum % 26); | |
275 | else | |
276 | snprintf(gd->disk_name, sizeof(gd->disk_name), | |
277 | "%s%d", tr->name, new->devnum); | |
1da177e4 LT |
278 | |
279 | /* 2.5 has capacity in units of 512 bytes while still | |
280 | having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ | |
19187672 | 281 | set_capacity(gd, (new->size * tr->blksize) >> 9); |
1da177e4 LT |
282 | |
283 | gd->private_data = new; | |
284 | new->blkcore_priv = gd; | |
285 | gd->queue = tr->blkcore_priv->rq; | |
286 | ||
287 | if (new->readonly) | |
288 | set_disk_ro(gd, 1); | |
289 | ||
290 | add_disk(gd); | |
97894cda | 291 | |
1da177e4 LT |
292 | return 0; |
293 | } | |
294 | ||
295 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) | |
296 | { | |
b3561ea9 | 297 | if (mutex_trylock(&mtd_table_mutex)) { |
48b19268 | 298 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
299 | BUG(); |
300 | } | |
301 | ||
302 | list_del(&old->list); | |
303 | ||
304 | del_gendisk(old->blkcore_priv); | |
305 | put_disk(old->blkcore_priv); | |
97894cda | 306 | |
1da177e4 LT |
307 | return 0; |
308 | } | |
309 | ||
310 | static void blktrans_notify_remove(struct mtd_info *mtd) | |
311 | { | |
312 | struct list_head *this, *this2, *next; | |
313 | ||
314 | list_for_each(this, &blktrans_majors) { | |
315 | struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); | |
316 | ||
317 | list_for_each_safe(this2, next, &tr->devs) { | |
318 | struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list); | |
319 | ||
320 | if (dev->mtd == mtd) | |
321 | tr->remove_dev(dev); | |
322 | } | |
323 | } | |
324 | } | |
325 | ||
326 | static void blktrans_notify_add(struct mtd_info *mtd) | |
327 | { | |
328 | struct list_head *this; | |
329 | ||
330 | if (mtd->type == MTD_ABSENT) | |
331 | return; | |
332 | ||
333 | list_for_each(this, &blktrans_majors) { | |
334 | struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); | |
335 | ||
336 | tr->add_mtd(tr, mtd); | |
337 | } | |
338 | ||
339 | } | |
340 | ||
341 | static struct mtd_notifier blktrans_notifier = { | |
342 | .add = blktrans_notify_add, | |
343 | .remove = blktrans_notify_remove, | |
344 | }; | |
97894cda | 345 | |
1da177e4 LT |
346 | int register_mtd_blktrans(struct mtd_blktrans_ops *tr) |
347 | { | |
348 | int ret, i; | |
349 | ||
97894cda | 350 | /* Register the notifier if/when the first device type is |
1da177e4 LT |
351 | registered, to prevent the link/init ordering from fucking |
352 | us over. */ | |
353 | if (!blktrans_notifier.list.next) | |
354 | register_mtd_user(&blktrans_notifier); | |
355 | ||
95b93a0c | 356 | tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); |
1da177e4 LT |
357 | if (!tr->blkcore_priv) |
358 | return -ENOMEM; | |
359 | ||
48b19268 | 360 | mutex_lock(&mtd_table_mutex); |
1da177e4 LT |
361 | |
362 | ret = register_blkdev(tr->major, tr->name); | |
363 | if (ret) { | |
364 | printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", | |
365 | tr->name, tr->major, ret); | |
366 | kfree(tr->blkcore_priv); | |
48b19268 | 367 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
368 | return ret; |
369 | } | |
370 | spin_lock_init(&tr->blkcore_priv->queue_lock); | |
1da177e4 LT |
371 | |
372 | tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); | |
373 | if (!tr->blkcore_priv->rq) { | |
374 | unregister_blkdev(tr->major, tr->name); | |
375 | kfree(tr->blkcore_priv); | |
48b19268 | 376 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
377 | return -ENOMEM; |
378 | } | |
379 | ||
380 | tr->blkcore_priv->rq->queuedata = tr; | |
19187672 RP |
381 | blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); |
382 | tr->blkshift = ffs(tr->blksize) - 1; | |
1da177e4 | 383 | |
3e67fe45 CH |
384 | tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, |
385 | "%sd", tr->name); | |
386 | if (IS_ERR(tr->blkcore_priv->thread)) { | |
1da177e4 LT |
387 | blk_cleanup_queue(tr->blkcore_priv->rq); |
388 | unregister_blkdev(tr->major, tr->name); | |
389 | kfree(tr->blkcore_priv); | |
48b19268 | 390 | mutex_unlock(&mtd_table_mutex); |
3e67fe45 | 391 | return PTR_ERR(tr->blkcore_priv->thread); |
97894cda | 392 | } |
1da177e4 | 393 | |
1da177e4 LT |
394 | INIT_LIST_HEAD(&tr->devs); |
395 | list_add(&tr->list, &blktrans_majors); | |
396 | ||
397 | for (i=0; i<MAX_MTD_DEVICES; i++) { | |
398 | if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) | |
399 | tr->add_mtd(tr, mtd_table[i]); | |
400 | } | |
401 | ||
48b19268 | 402 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
403 | |
404 | return 0; | |
405 | } | |
406 | ||
407 | int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) | |
408 | { | |
409 | struct list_head *this, *next; | |
410 | ||
48b19268 | 411 | mutex_lock(&mtd_table_mutex); |
1da177e4 LT |
412 | |
413 | /* Clean up the kernel thread */ | |
3e67fe45 | 414 | kthread_stop(tr->blkcore_priv->thread); |
1da177e4 LT |
415 | |
416 | /* Remove it from the list of active majors */ | |
417 | list_del(&tr->list); | |
418 | ||
419 | list_for_each_safe(this, next, &tr->devs) { | |
420 | struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list); | |
421 | tr->remove_dev(dev); | |
422 | } | |
423 | ||
1da177e4 LT |
424 | blk_cleanup_queue(tr->blkcore_priv->rq); |
425 | unregister_blkdev(tr->major, tr->name); | |
426 | ||
48b19268 | 427 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
428 | |
429 | kfree(tr->blkcore_priv); | |
430 | ||
373ebfbf | 431 | BUG_ON(!list_empty(&tr->devs)); |
1da177e4 LT |
432 | return 0; |
433 | } | |
434 | ||
435 | static void __exit mtd_blktrans_exit(void) | |
436 | { | |
437 | /* No race here -- if someone's currently in register_mtd_blktrans | |
438 | we're screwed anyway. */ | |
439 | if (blktrans_notifier.list.next) | |
440 | unregister_mtd_user(&blktrans_notifier); | |
441 | } | |
442 | ||
443 | module_exit(mtd_blktrans_exit); | |
444 | ||
445 | EXPORT_SYMBOL_GPL(register_mtd_blktrans); | |
446 | EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); | |
447 | EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); | |
448 | EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); | |
449 | ||
450 | MODULE_AUTHOR("David Woodhouse <[email protected]>"); | |
451 | MODULE_LICENSE("GPL"); | |
452 | MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); |