]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/resource.c | |
3 | * | |
4 | * Copyright (C) 1999 Linus Torvalds | |
5 | * Copyright (C) 1999 Martin Mares <[email protected]> | |
6 | * | |
7 | * Arbitrary resource management. | |
8 | */ | |
9 | ||
10 | #include <linux/config.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/ioport.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/proc_fs.h> | |
20 | #include <linux/seq_file.h> | |
21 | #include <asm/io.h> | |
22 | ||
23 | ||
24 | struct resource ioport_resource = { | |
25 | .name = "PCI IO", | |
26 | .start = 0x0000, | |
27 | .end = IO_SPACE_LIMIT, | |
28 | .flags = IORESOURCE_IO, | |
29 | }; | |
30 | ||
31 | EXPORT_SYMBOL(ioport_resource); | |
32 | ||
33 | struct resource iomem_resource = { | |
34 | .name = "PCI mem", | |
35 | .start = 0UL, | |
36 | .end = ~0UL, | |
37 | .flags = IORESOURCE_MEM, | |
38 | }; | |
39 | ||
40 | EXPORT_SYMBOL(iomem_resource); | |
41 | ||
42 | static DEFINE_RWLOCK(resource_lock); | |
43 | ||
44 | #ifdef CONFIG_PROC_FS | |
45 | ||
46 | enum { MAX_IORES_LEVEL = 5 }; | |
47 | ||
48 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | |
49 | { | |
50 | struct resource *p = v; | |
51 | (*pos)++; | |
52 | if (p->child) | |
53 | return p->child; | |
54 | while (!p->sibling && p->parent) | |
55 | p = p->parent; | |
56 | return p->sibling; | |
57 | } | |
58 | ||
59 | static void *r_start(struct seq_file *m, loff_t *pos) | |
60 | __acquires(resource_lock) | |
61 | { | |
62 | struct resource *p = m->private; | |
63 | loff_t l = 0; | |
64 | read_lock(&resource_lock); | |
65 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) | |
66 | ; | |
67 | return p; | |
68 | } | |
69 | ||
70 | static void r_stop(struct seq_file *m, void *v) | |
71 | __releases(resource_lock) | |
72 | { | |
73 | read_unlock(&resource_lock); | |
74 | } | |
75 | ||
76 | static int r_show(struct seq_file *m, void *v) | |
77 | { | |
78 | struct resource *root = m->private; | |
79 | struct resource *r = v, *p; | |
80 | int width = root->end < 0x10000 ? 4 : 8; | |
81 | int depth; | |
82 | ||
83 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) | |
84 | if (p->parent == root) | |
85 | break; | |
86 | seq_printf(m, "%*s%0*lx-%0*lx : %s\n", | |
87 | depth * 2, "", | |
88 | width, r->start, | |
89 | width, r->end, | |
90 | r->name ? r->name : "<BAD>"); | |
91 | return 0; | |
92 | } | |
93 | ||
94 | static struct seq_operations resource_op = { | |
95 | .start = r_start, | |
96 | .next = r_next, | |
97 | .stop = r_stop, | |
98 | .show = r_show, | |
99 | }; | |
100 | ||
101 | static int ioports_open(struct inode *inode, struct file *file) | |
102 | { | |
103 | int res = seq_open(file, &resource_op); | |
104 | if (!res) { | |
105 | struct seq_file *m = file->private_data; | |
106 | m->private = &ioport_resource; | |
107 | } | |
108 | return res; | |
109 | } | |
110 | ||
111 | static int iomem_open(struct inode *inode, struct file *file) | |
112 | { | |
113 | int res = seq_open(file, &resource_op); | |
114 | if (!res) { | |
115 | struct seq_file *m = file->private_data; | |
116 | m->private = &iomem_resource; | |
117 | } | |
118 | return res; | |
119 | } | |
120 | ||
121 | static struct file_operations proc_ioports_operations = { | |
122 | .open = ioports_open, | |
123 | .read = seq_read, | |
124 | .llseek = seq_lseek, | |
125 | .release = seq_release, | |
126 | }; | |
127 | ||
128 | static struct file_operations proc_iomem_operations = { | |
129 | .open = iomem_open, | |
130 | .read = seq_read, | |
131 | .llseek = seq_lseek, | |
132 | .release = seq_release, | |
133 | }; | |
134 | ||
135 | static int __init ioresources_init(void) | |
136 | { | |
137 | struct proc_dir_entry *entry; | |
138 | ||
139 | entry = create_proc_entry("ioports", 0, NULL); | |
140 | if (entry) | |
141 | entry->proc_fops = &proc_ioports_operations; | |
142 | entry = create_proc_entry("iomem", 0, NULL); | |
143 | if (entry) | |
144 | entry->proc_fops = &proc_iomem_operations; | |
145 | return 0; | |
146 | } | |
147 | __initcall(ioresources_init); | |
148 | ||
149 | #endif /* CONFIG_PROC_FS */ | |
150 | ||
151 | /* Return the conflict entry if you can't request it */ | |
152 | static struct resource * __request_resource(struct resource *root, struct resource *new) | |
153 | { | |
154 | unsigned long start = new->start; | |
155 | unsigned long end = new->end; | |
156 | struct resource *tmp, **p; | |
157 | ||
158 | if (end < start) | |
159 | return root; | |
160 | if (start < root->start) | |
161 | return root; | |
162 | if (end > root->end) | |
163 | return root; | |
164 | p = &root->child; | |
165 | for (;;) { | |
166 | tmp = *p; | |
167 | if (!tmp || tmp->start > end) { | |
168 | new->sibling = tmp; | |
169 | *p = new; | |
170 | new->parent = root; | |
171 | return NULL; | |
172 | } | |
173 | p = &tmp->sibling; | |
174 | if (tmp->end < start) | |
175 | continue; | |
176 | return tmp; | |
177 | } | |
178 | } | |
179 | ||
180 | static int __release_resource(struct resource *old) | |
181 | { | |
182 | struct resource *tmp, **p; | |
183 | ||
184 | p = &old->parent->child; | |
185 | for (;;) { | |
186 | tmp = *p; | |
187 | if (!tmp) | |
188 | break; | |
189 | if (tmp == old) { | |
190 | *p = tmp->sibling; | |
191 | old->parent = NULL; | |
192 | return 0; | |
193 | } | |
194 | p = &tmp->sibling; | |
195 | } | |
196 | return -EINVAL; | |
197 | } | |
198 | ||
199 | int request_resource(struct resource *root, struct resource *new) | |
200 | { | |
201 | struct resource *conflict; | |
202 | ||
203 | write_lock(&resource_lock); | |
204 | conflict = __request_resource(root, new); | |
205 | write_unlock(&resource_lock); | |
206 | return conflict ? -EBUSY : 0; | |
207 | } | |
208 | ||
209 | EXPORT_SYMBOL(request_resource); | |
210 | ||
211 | struct resource *____request_resource(struct resource *root, struct resource *new) | |
212 | { | |
213 | struct resource *conflict; | |
214 | ||
215 | write_lock(&resource_lock); | |
216 | conflict = __request_resource(root, new); | |
217 | write_unlock(&resource_lock); | |
218 | return conflict; | |
219 | } | |
220 | ||
221 | EXPORT_SYMBOL(____request_resource); | |
222 | ||
223 | int release_resource(struct resource *old) | |
224 | { | |
225 | int retval; | |
226 | ||
227 | write_lock(&resource_lock); | |
228 | retval = __release_resource(old); | |
229 | write_unlock(&resource_lock); | |
230 | return retval; | |
231 | } | |
232 | ||
233 | EXPORT_SYMBOL(release_resource); | |
234 | ||
235 | /* | |
236 | * Find empty slot in the resource tree given range and alignment. | |
237 | */ | |
238 | static int find_resource(struct resource *root, struct resource *new, | |
239 | unsigned long size, | |
240 | unsigned long min, unsigned long max, | |
241 | unsigned long align, | |
242 | void (*alignf)(void *, struct resource *, | |
243 | unsigned long, unsigned long), | |
244 | void *alignf_data) | |
245 | { | |
246 | struct resource *this = root->child; | |
247 | ||
248 | new->start = root->start; | |
249 | /* | |
250 | * Skip past an allocated resource that starts at 0, since the assignment | |
251 | * of this->start - 1 to new->end below would cause an underflow. | |
252 | */ | |
253 | if (this && this->start == 0) { | |
254 | new->start = this->end + 1; | |
255 | this = this->sibling; | |
256 | } | |
257 | for(;;) { | |
258 | if (this) | |
259 | new->end = this->start - 1; | |
260 | else | |
261 | new->end = root->end; | |
262 | if (new->start < min) | |
263 | new->start = min; | |
264 | if (new->end > max) | |
265 | new->end = max; | |
8c0e33c1 | 266 | new->start = ALIGN(new->start, align); |
1da177e4 LT |
267 | if (alignf) |
268 | alignf(alignf_data, new, size, align); | |
b52402c7 | 269 | if (new->start < new->end && new->end - new->start >= size - 1) { |
1da177e4 LT |
270 | new->end = new->start + size - 1; |
271 | return 0; | |
272 | } | |
273 | if (!this) | |
274 | break; | |
275 | new->start = this->end + 1; | |
276 | this = this->sibling; | |
277 | } | |
278 | return -EBUSY; | |
279 | } | |
280 | ||
281 | /* | |
282 | * Allocate empty slot in the resource tree given range and alignment. | |
283 | */ | |
284 | int allocate_resource(struct resource *root, struct resource *new, | |
285 | unsigned long size, | |
286 | unsigned long min, unsigned long max, | |
287 | unsigned long align, | |
288 | void (*alignf)(void *, struct resource *, | |
289 | unsigned long, unsigned long), | |
290 | void *alignf_data) | |
291 | { | |
292 | int err; | |
293 | ||
294 | write_lock(&resource_lock); | |
295 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | |
296 | if (err >= 0 && __request_resource(root, new)) | |
297 | err = -EBUSY; | |
298 | write_unlock(&resource_lock); | |
299 | return err; | |
300 | } | |
301 | ||
302 | EXPORT_SYMBOL(allocate_resource); | |
303 | ||
304 | /** | |
305 | * insert_resource - Inserts a resource in the resource tree | |
306 | * @parent: parent of the new resource | |
307 | * @new: new resource to insert | |
308 | * | |
309 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | |
310 | * | |
311 | * This function is equivalent of request_resource when no conflict | |
312 | * happens. If a conflict happens, and the conflicting resources | |
313 | * entirely fit within the range of the new resource, then the new | |
314 | * resource is inserted and the conflicting resources become childs of | |
315 | * the new resource. Otherwise the new resource becomes the child of | |
316 | * the conflicting resource | |
317 | */ | |
318 | int insert_resource(struct resource *parent, struct resource *new) | |
319 | { | |
320 | int result; | |
321 | struct resource *first, *next; | |
322 | ||
323 | write_lock(&resource_lock); | |
324 | begin: | |
325 | result = 0; | |
326 | first = __request_resource(parent, new); | |
327 | if (!first) | |
328 | goto out; | |
329 | ||
330 | result = -EBUSY; | |
331 | if (first == parent) | |
332 | goto out; | |
333 | ||
334 | /* Resource fully contained by the clashing resource? Recurse into it */ | |
335 | if (first->start <= new->start && first->end >= new->end) { | |
336 | parent = first; | |
337 | goto begin; | |
338 | } | |
339 | ||
340 | for (next = first; ; next = next->sibling) { | |
341 | /* Partial overlap? Bad, and unfixable */ | |
342 | if (next->start < new->start || next->end > new->end) | |
343 | goto out; | |
344 | if (!next->sibling) | |
345 | break; | |
346 | if (next->sibling->start > new->end) | |
347 | break; | |
348 | } | |
349 | ||
350 | result = 0; | |
351 | ||
352 | new->parent = parent; | |
353 | new->sibling = next->sibling; | |
354 | new->child = first; | |
355 | ||
356 | next->sibling = NULL; | |
357 | for (next = first; next; next = next->sibling) | |
358 | next->parent = new; | |
359 | ||
360 | if (parent->child == first) { | |
361 | parent->child = new; | |
362 | } else { | |
363 | next = parent->child; | |
364 | while (next->sibling != first) | |
365 | next = next->sibling; | |
366 | next->sibling = new; | |
367 | } | |
368 | ||
369 | out: | |
370 | write_unlock(&resource_lock); | |
371 | return result; | |
372 | } | |
373 | ||
374 | EXPORT_SYMBOL(insert_resource); | |
375 | ||
376 | /* | |
377 | * Given an existing resource, change its start and size to match the | |
378 | * arguments. Returns -EBUSY if it can't fit. Existing children of | |
379 | * the resource are assumed to be immutable. | |
380 | */ | |
381 | int adjust_resource(struct resource *res, unsigned long start, unsigned long size) | |
382 | { | |
383 | struct resource *tmp, *parent = res->parent; | |
384 | unsigned long end = start + size - 1; | |
385 | int result = -EBUSY; | |
386 | ||
387 | write_lock(&resource_lock); | |
388 | ||
389 | if ((start < parent->start) || (end > parent->end)) | |
390 | goto out; | |
391 | ||
392 | for (tmp = res->child; tmp; tmp = tmp->sibling) { | |
393 | if ((tmp->start < start) || (tmp->end > end)) | |
394 | goto out; | |
395 | } | |
396 | ||
397 | if (res->sibling && (res->sibling->start <= end)) | |
398 | goto out; | |
399 | ||
400 | tmp = parent->child; | |
401 | if (tmp != res) { | |
402 | while (tmp->sibling != res) | |
403 | tmp = tmp->sibling; | |
404 | if (start <= tmp->end) | |
405 | goto out; | |
406 | } | |
407 | ||
408 | res->start = start; | |
409 | res->end = end; | |
410 | result = 0; | |
411 | ||
412 | out: | |
413 | write_unlock(&resource_lock); | |
414 | return result; | |
415 | } | |
416 | ||
417 | EXPORT_SYMBOL(adjust_resource); | |
418 | ||
419 | /* | |
420 | * This is compatibility stuff for IO resources. | |
421 | * | |
422 | * Note how this, unlike the above, knows about | |
423 | * the IO flag meanings (busy etc). | |
424 | * | |
425 | * Request-region creates a new busy region. | |
426 | * | |
427 | * Check-region returns non-zero if the area is already busy | |
428 | * | |
429 | * Release-region releases a matching busy region. | |
430 | */ | |
431 | struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name) | |
432 | { | |
dd392710 | 433 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); |
1da177e4 LT |
434 | |
435 | if (res) { | |
1da177e4 LT |
436 | res->name = name; |
437 | res->start = start; | |
438 | res->end = start + n - 1; | |
439 | res->flags = IORESOURCE_BUSY; | |
440 | ||
441 | write_lock(&resource_lock); | |
442 | ||
443 | for (;;) { | |
444 | struct resource *conflict; | |
445 | ||
446 | conflict = __request_resource(parent, res); | |
447 | if (!conflict) | |
448 | break; | |
449 | if (conflict != parent) { | |
450 | parent = conflict; | |
451 | if (!(conflict->flags & IORESOURCE_BUSY)) | |
452 | continue; | |
453 | } | |
454 | ||
455 | /* Uhhuh, that didn't work out.. */ | |
456 | kfree(res); | |
457 | res = NULL; | |
458 | break; | |
459 | } | |
460 | write_unlock(&resource_lock); | |
461 | } | |
462 | return res; | |
463 | } | |
464 | ||
465 | EXPORT_SYMBOL(__request_region); | |
466 | ||
d974837a | 467 | int __check_region(struct resource *parent, unsigned long start, unsigned long n) |
1da177e4 LT |
468 | { |
469 | struct resource * res; | |
470 | ||
471 | res = __request_region(parent, start, n, "check-region"); | |
472 | if (!res) | |
473 | return -EBUSY; | |
474 | ||
475 | release_resource(res); | |
476 | kfree(res); | |
477 | return 0; | |
478 | } | |
479 | ||
480 | EXPORT_SYMBOL(__check_region); | |
481 | ||
482 | void __release_region(struct resource *parent, unsigned long start, unsigned long n) | |
483 | { | |
484 | struct resource **p; | |
485 | unsigned long end; | |
486 | ||
487 | p = &parent->child; | |
488 | end = start + n - 1; | |
489 | ||
490 | write_lock(&resource_lock); | |
491 | ||
492 | for (;;) { | |
493 | struct resource *res = *p; | |
494 | ||
495 | if (!res) | |
496 | break; | |
497 | if (res->start <= start && res->end >= end) { | |
498 | if (!(res->flags & IORESOURCE_BUSY)) { | |
499 | p = &res->child; | |
500 | continue; | |
501 | } | |
502 | if (res->start != start || res->end != end) | |
503 | break; | |
504 | *p = res->sibling; | |
505 | write_unlock(&resource_lock); | |
506 | kfree(res); | |
507 | return; | |
508 | } | |
509 | p = &res->sibling; | |
510 | } | |
511 | ||
512 | write_unlock(&resource_lock); | |
513 | ||
514 | printk(KERN_WARNING "Trying to free nonexistent resource <%08lx-%08lx>\n", start, end); | |
515 | } | |
516 | ||
517 | EXPORT_SYMBOL(__release_region); | |
518 | ||
519 | /* | |
520 | * Called from init/main.c to reserve IO ports. | |
521 | */ | |
522 | #define MAXRESERVE 4 | |
523 | static int __init reserve_setup(char *str) | |
524 | { | |
525 | static int reserved; | |
526 | static struct resource reserve[MAXRESERVE]; | |
527 | ||
528 | for (;;) { | |
529 | int io_start, io_num; | |
530 | int x = reserved; | |
531 | ||
532 | if (get_option (&str, &io_start) != 2) | |
533 | break; | |
534 | if (get_option (&str, &io_num) == 0) | |
535 | break; | |
536 | if (x < MAXRESERVE) { | |
537 | struct resource *res = reserve + x; | |
538 | res->name = "reserved"; | |
539 | res->start = io_start; | |
540 | res->end = io_start + io_num - 1; | |
541 | res->flags = IORESOURCE_BUSY; | |
542 | res->child = NULL; | |
543 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) | |
544 | reserved = x+1; | |
545 | } | |
546 | } | |
547 | return 1; | |
548 | } | |
549 | ||
550 | __setup("reserve=", reserve_setup); |