]>
Commit | Line | Data |
---|---|---|
989d42e8 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * drivers/base/core.c - core driver model code (device registration, etc) | |
4 | * | |
5 | * Copyright (c) 2002-3 Patrick Mochel | |
6 | * Copyright (c) 2002-3 Open Source Development Labs | |
64bb5d2c GKH |
7 | * Copyright (c) 2006 Greg Kroah-Hartman <[email protected]> |
8 | * Copyright (c) 2006 Novell, Inc. | |
1da177e4 LT |
9 | */ |
10 | ||
7847a145 | 11 | #include <linux/acpi.h> |
65650b35 | 12 | #include <linux/cpufreq.h> |
1da177e4 LT |
13 | #include <linux/device.h> |
14 | #include <linux/err.h> | |
97badf87 | 15 | #include <linux/fwnode.h> |
1da177e4 LT |
16 | #include <linux/init.h> |
17 | #include <linux/module.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/string.h> | |
23681e47 | 20 | #include <linux/kdev_t.h> |
116af378 | 21 | #include <linux/notifier.h> |
07d57a32 GL |
22 | #include <linux/of.h> |
23 | #include <linux/of_device.h> | |
da231fd5 | 24 | #include <linux/genhd.h> |
f75b1c60 | 25 | #include <linux/mutex.h> |
af8db150 | 26 | #include <linux/pm_runtime.h> |
c4e00daa | 27 | #include <linux/netdevice.h> |
174cd4b1 | 28 | #include <linux/sched/signal.h> |
b8530017 | 29 | #include <linux/sched/mm.h> |
63967685 | 30 | #include <linux/sysfs.h> |
1da177e4 LT |
31 | |
32 | #include "base.h" | |
33 | #include "power/power.h" | |
34 | ||
e52eec13 AK |
35 | #ifdef CONFIG_SYSFS_DEPRECATED |
36 | #ifdef CONFIG_SYSFS_DEPRECATED_V2 | |
37 | long sysfs_deprecated = 1; | |
38 | #else | |
39 | long sysfs_deprecated = 0; | |
40 | #endif | |
3454bf96 | 41 | static int __init sysfs_deprecated_setup(char *arg) |
e52eec13 | 42 | { |
34da5e67 | 43 | return kstrtol(arg, 10, &sysfs_deprecated); |
e52eec13 AK |
44 | } |
45 | early_param("sysfs.deprecated", sysfs_deprecated_setup); | |
46 | #endif | |
47 | ||
9ed98953 | 48 | /* Device links support. */ |
e2ae9bcc SK |
49 | static LIST_HEAD(wait_for_suppliers); |
50 | static DEFINE_MUTEX(wfs_lock); | |
fc5a251d SK |
51 | static LIST_HEAD(deferred_sync); |
52 | static unsigned int defer_sync_state_count = 1; | |
7b337cb3 SK |
53 | static DEFINE_MUTEX(fwnode_link_lock); |
54 | ||
55 | /** | |
56 | * fwnode_link_add - Create a link between two fwnode_handles. | |
57 | * @con: Consumer end of the link. | |
58 | * @sup: Supplier end of the link. | |
59 | * | |
60 | * Create a fwnode link between fwnode handles @con and @sup. The fwnode link | |
61 | * represents the detail that the firmware lists @sup fwnode as supplying a | |
62 | * resource to @con. | |
63 | * | |
64 | * The driver core will use the fwnode link to create a device link between the | |
65 | * two device objects corresponding to @con and @sup when they are created. The | |
66 | * driver core will automatically delete the fwnode link between @con and @sup | |
67 | * after doing that. | |
68 | * | |
69 | * Attempts to create duplicate links between the same pair of fwnode handles | |
70 | * are ignored and there is no reference counting. | |
71 | */ | |
72 | int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) | |
73 | { | |
74 | struct fwnode_link *link; | |
75 | int ret = 0; | |
76 | ||
77 | mutex_lock(&fwnode_link_lock); | |
78 | ||
79 | list_for_each_entry(link, &sup->consumers, s_hook) | |
80 | if (link->consumer == con) | |
81 | goto out; | |
82 | ||
83 | link = kzalloc(sizeof(*link), GFP_KERNEL); | |
84 | if (!link) { | |
85 | ret = -ENOMEM; | |
86 | goto out; | |
87 | } | |
88 | ||
89 | link->supplier = sup; | |
90 | INIT_LIST_HEAD(&link->s_hook); | |
91 | link->consumer = con; | |
92 | INIT_LIST_HEAD(&link->c_hook); | |
93 | ||
94 | list_add(&link->s_hook, &sup->consumers); | |
95 | list_add(&link->c_hook, &con->suppliers); | |
96 | out: | |
97 | mutex_unlock(&fwnode_link_lock); | |
98 | ||
99 | return ret; | |
100 | } | |
101 | ||
102 | /** | |
103 | * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. | |
104 | * @fwnode: fwnode whose supplier links need to be deleted | |
105 | * | |
106 | * Deletes all supplier links connecting directly to @fwnode. | |
107 | */ | |
108 | static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) | |
109 | { | |
110 | struct fwnode_link *link, *tmp; | |
111 | ||
112 | mutex_lock(&fwnode_link_lock); | |
113 | list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { | |
114 | list_del(&link->s_hook); | |
115 | list_del(&link->c_hook); | |
116 | kfree(link); | |
117 | } | |
118 | mutex_unlock(&fwnode_link_lock); | |
119 | } | |
120 | ||
121 | /** | |
122 | * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle. | |
123 | * @fwnode: fwnode whose consumer links need to be deleted | |
124 | * | |
125 | * Deletes all consumer links connecting directly to @fwnode. | |
126 | */ | |
127 | static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) | |
128 | { | |
129 | struct fwnode_link *link, *tmp; | |
130 | ||
131 | mutex_lock(&fwnode_link_lock); | |
132 | list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { | |
133 | list_del(&link->s_hook); | |
134 | list_del(&link->c_hook); | |
135 | kfree(link); | |
136 | } | |
137 | mutex_unlock(&fwnode_link_lock); | |
138 | } | |
139 | ||
140 | /** | |
141 | * fwnode_links_purge - Delete all links connected to a fwnode_handle. | |
142 | * @fwnode: fwnode whose links needs to be deleted | |
143 | * | |
144 | * Deletes all links connecting directly to a fwnode. | |
145 | */ | |
146 | void fwnode_links_purge(struct fwnode_handle *fwnode) | |
147 | { | |
148 | fwnode_links_purge_suppliers(fwnode); | |
149 | fwnode_links_purge_consumers(fwnode); | |
150 | } | |
9ed98953 RW |
151 | |
152 | #ifdef CONFIG_SRCU | |
153 | static DEFINE_MUTEX(device_links_lock); | |
154 | DEFINE_STATIC_SRCU(device_links_srcu); | |
155 | ||
156 | static inline void device_links_write_lock(void) | |
157 | { | |
158 | mutex_lock(&device_links_lock); | |
159 | } | |
160 | ||
161 | static inline void device_links_write_unlock(void) | |
162 | { | |
163 | mutex_unlock(&device_links_lock); | |
164 | } | |
165 | ||
68464d79 | 166 | int device_links_read_lock(void) __acquires(&device_links_srcu) |
9ed98953 RW |
167 | { |
168 | return srcu_read_lock(&device_links_srcu); | |
169 | } | |
170 | ||
ab7789c5 | 171 | void device_links_read_unlock(int idx) __releases(&device_links_srcu) |
9ed98953 RW |
172 | { |
173 | srcu_read_unlock(&device_links_srcu, idx); | |
174 | } | |
c2fa1e1b JFG |
175 | |
176 | int device_links_read_lock_held(void) | |
177 | { | |
178 | return srcu_read_lock_held(&device_links_srcu); | |
179 | } | |
9ed98953 RW |
180 | #else /* !CONFIG_SRCU */ |
181 | static DECLARE_RWSEM(device_links_lock); | |
182 | ||
183 | static inline void device_links_write_lock(void) | |
184 | { | |
185 | down_write(&device_links_lock); | |
186 | } | |
187 | ||
188 | static inline void device_links_write_unlock(void) | |
189 | { | |
190 | up_write(&device_links_lock); | |
191 | } | |
192 | ||
193 | int device_links_read_lock(void) | |
194 | { | |
195 | down_read(&device_links_lock); | |
196 | return 0; | |
197 | } | |
198 | ||
199 | void device_links_read_unlock(int not_used) | |
200 | { | |
201 | up_read(&device_links_lock); | |
202 | } | |
c2fa1e1b JFG |
203 | |
204 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
205 | int device_links_read_lock_held(void) | |
206 | { | |
207 | return lockdep_is_held(&device_links_lock); | |
208 | } | |
209 | #endif | |
9ed98953 RW |
210 | #endif /* !CONFIG_SRCU */ |
211 | ||
212 | /** | |
213 | * device_is_dependent - Check if one device depends on another one | |
214 | * @dev: Device to check dependencies for. | |
215 | * @target: Device to check against. | |
216 | * | |
217 | * Check if @target depends on @dev or any device dependent on it (its child or | |
218 | * its consumer etc). Return 1 if that is the case or 0 otherwise. | |
219 | */ | |
7d34ca38 | 220 | int device_is_dependent(struct device *dev, void *target) |
9ed98953 RW |
221 | { |
222 | struct device_link *link; | |
223 | int ret; | |
224 | ||
e16f4f3e | 225 | if (dev == target) |
9ed98953 RW |
226 | return 1; |
227 | ||
228 | ret = device_for_each_child(dev, target, device_is_dependent); | |
229 | if (ret) | |
230 | return ret; | |
231 | ||
232 | list_for_each_entry(link, &dev->links.consumers, s_node) { | |
05ef983e SK |
233 | if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) |
234 | continue; | |
235 | ||
e16f4f3e | 236 | if (link->consumer == target) |
9ed98953 RW |
237 | return 1; |
238 | ||
239 | ret = device_is_dependent(link->consumer, target); | |
240 | if (ret) | |
241 | break; | |
242 | } | |
243 | return ret; | |
244 | } | |
245 | ||
515db266 RW |
246 | static void device_link_init_status(struct device_link *link, |
247 | struct device *consumer, | |
248 | struct device *supplier) | |
249 | { | |
250 | switch (supplier->links.status) { | |
251 | case DL_DEV_PROBING: | |
252 | switch (consumer->links.status) { | |
253 | case DL_DEV_PROBING: | |
254 | /* | |
255 | * A consumer driver can create a link to a supplier | |
256 | * that has not completed its probing yet as long as it | |
257 | * knows that the supplier is already functional (for | |
258 | * example, it has just acquired some resources from the | |
259 | * supplier). | |
260 | */ | |
261 | link->status = DL_STATE_CONSUMER_PROBE; | |
262 | break; | |
263 | default: | |
264 | link->status = DL_STATE_DORMANT; | |
265 | break; | |
266 | } | |
267 | break; | |
268 | case DL_DEV_DRIVER_BOUND: | |
269 | switch (consumer->links.status) { | |
270 | case DL_DEV_PROBING: | |
271 | link->status = DL_STATE_CONSUMER_PROBE; | |
272 | break; | |
273 | case DL_DEV_DRIVER_BOUND: | |
274 | link->status = DL_STATE_ACTIVE; | |
275 | break; | |
276 | default: | |
277 | link->status = DL_STATE_AVAILABLE; | |
278 | break; | |
279 | } | |
280 | break; | |
281 | case DL_DEV_UNBINDING: | |
282 | link->status = DL_STATE_SUPPLIER_UNBIND; | |
283 | break; | |
284 | default: | |
285 | link->status = DL_STATE_DORMANT; | |
286 | break; | |
287 | } | |
288 | } | |
289 | ||
9ed98953 RW |
290 | static int device_reorder_to_tail(struct device *dev, void *not_used) |
291 | { | |
292 | struct device_link *link; | |
293 | ||
294 | /* | |
295 | * Devices that have not been registered yet will be put to the ends | |
296 | * of the lists during the registration, so skip them here. | |
297 | */ | |
298 | if (device_is_registered(dev)) | |
299 | devices_kset_move_last(dev); | |
300 | ||
301 | if (device_pm_initialized(dev)) | |
302 | device_pm_move_last(dev); | |
303 | ||
304 | device_for_each_child(dev, NULL, device_reorder_to_tail); | |
05ef983e SK |
305 | list_for_each_entry(link, &dev->links.consumers, s_node) { |
306 | if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) | |
307 | continue; | |
9ed98953 | 308 | device_reorder_to_tail(link->consumer, NULL); |
05ef983e | 309 | } |
9ed98953 RW |
310 | |
311 | return 0; | |
312 | } | |
313 | ||
494fd7b7 FK |
314 | /** |
315 | * device_pm_move_to_tail - Move set of devices to the end of device lists | |
316 | * @dev: Device to move | |
317 | * | |
318 | * This is a device_reorder_to_tail() wrapper taking the requisite locks. | |
319 | * | |
320 | * It moves the @dev along with all of its children and all of its consumers | |
321 | * to the ends of the device_kset and dpm_list, recursively. | |
322 | */ | |
323 | void device_pm_move_to_tail(struct device *dev) | |
324 | { | |
325 | int idx; | |
326 | ||
327 | idx = device_links_read_lock(); | |
328 | device_pm_lock(); | |
329 | device_reorder_to_tail(dev, NULL); | |
330 | device_pm_unlock(); | |
331 | device_links_read_unlock(idx); | |
332 | } | |
333 | ||
287905e6 SK |
334 | #define to_devlink(dev) container_of((dev), struct device_link, link_dev) |
335 | ||
336 | static ssize_t status_show(struct device *dev, | |
948b3edb | 337 | struct device_attribute *attr, char *buf) |
287905e6 | 338 | { |
948b3edb | 339 | const char *output; |
287905e6 SK |
340 | |
341 | switch (to_devlink(dev)->status) { | |
342 | case DL_STATE_NONE: | |
948b3edb JP |
343 | output = "not tracked"; |
344 | break; | |
287905e6 | 345 | case DL_STATE_DORMANT: |
948b3edb JP |
346 | output = "dormant"; |
347 | break; | |
287905e6 | 348 | case DL_STATE_AVAILABLE: |
948b3edb JP |
349 | output = "available"; |
350 | break; | |
287905e6 | 351 | case DL_STATE_CONSUMER_PROBE: |
948b3edb JP |
352 | output = "consumer probing"; |
353 | break; | |
287905e6 | 354 | case DL_STATE_ACTIVE: |
948b3edb JP |
355 | output = "active"; |
356 | break; | |
287905e6 | 357 | case DL_STATE_SUPPLIER_UNBIND: |
948b3edb JP |
358 | output = "supplier unbinding"; |
359 | break; | |
287905e6 | 360 | default: |
948b3edb JP |
361 | output = "unknown"; |
362 | break; | |
287905e6 | 363 | } |
948b3edb JP |
364 | |
365 | return sysfs_emit(buf, "%s\n", output); | |
287905e6 SK |
366 | } |
367 | static DEVICE_ATTR_RO(status); | |
368 | ||
369 | static ssize_t auto_remove_on_show(struct device *dev, | |
370 | struct device_attribute *attr, char *buf) | |
371 | { | |
372 | struct device_link *link = to_devlink(dev); | |
973c3911 | 373 | const char *output; |
287905e6 SK |
374 | |
375 | if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) | |
973c3911 | 376 | output = "supplier unbind"; |
287905e6 | 377 | else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) |
973c3911 | 378 | output = "consumer unbind"; |
287905e6 | 379 | else |
973c3911 | 380 | output = "never"; |
287905e6 | 381 | |
973c3911 | 382 | return sysfs_emit(buf, "%s\n", output); |
287905e6 SK |
383 | } |
384 | static DEVICE_ATTR_RO(auto_remove_on); | |
385 | ||
386 | static ssize_t runtime_pm_show(struct device *dev, | |
387 | struct device_attribute *attr, char *buf) | |
388 | { | |
389 | struct device_link *link = to_devlink(dev); | |
390 | ||
aa838896 | 391 | return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); |
287905e6 SK |
392 | } |
393 | static DEVICE_ATTR_RO(runtime_pm); | |
394 | ||
395 | static ssize_t sync_state_only_show(struct device *dev, | |
396 | struct device_attribute *attr, char *buf) | |
397 | { | |
398 | struct device_link *link = to_devlink(dev); | |
399 | ||
aa838896 JP |
400 | return sysfs_emit(buf, "%d\n", |
401 | !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); | |
287905e6 SK |
402 | } |
403 | static DEVICE_ATTR_RO(sync_state_only); | |
404 | ||
405 | static struct attribute *devlink_attrs[] = { | |
406 | &dev_attr_status.attr, | |
407 | &dev_attr_auto_remove_on.attr, | |
408 | &dev_attr_runtime_pm.attr, | |
409 | &dev_attr_sync_state_only.attr, | |
410 | NULL, | |
411 | }; | |
412 | ATTRIBUTE_GROUPS(devlink); | |
413 | ||
843e600b SK |
414 | static void device_link_free(struct device_link *link) |
415 | { | |
416 | while (refcount_dec_not_one(&link->rpm_active)) | |
417 | pm_runtime_put(link->supplier); | |
418 | ||
419 | put_device(link->consumer); | |
420 | put_device(link->supplier); | |
421 | kfree(link); | |
422 | } | |
423 | ||
424 | #ifdef CONFIG_SRCU | |
425 | static void __device_link_free_srcu(struct rcu_head *rhead) | |
426 | { | |
427 | device_link_free(container_of(rhead, struct device_link, rcu_head)); | |
428 | } | |
429 | ||
287905e6 SK |
430 | static void devlink_dev_release(struct device *dev) |
431 | { | |
843e600b SK |
432 | struct device_link *link = to_devlink(dev); |
433 | ||
434 | call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); | |
287905e6 | 435 | } |
843e600b SK |
436 | #else |
437 | static void devlink_dev_release(struct device *dev) | |
438 | { | |
439 | device_link_free(to_devlink(dev)); | |
440 | } | |
441 | #endif | |
287905e6 SK |
442 | |
443 | static struct class devlink_class = { | |
444 | .name = "devlink", | |
445 | .owner = THIS_MODULE, | |
446 | .dev_groups = devlink_groups, | |
447 | .dev_release = devlink_dev_release, | |
448 | }; | |
449 | ||
450 | static int devlink_add_symlinks(struct device *dev, | |
451 | struct class_interface *class_intf) | |
452 | { | |
453 | int ret; | |
454 | size_t len; | |
455 | struct device_link *link = to_devlink(dev); | |
456 | struct device *sup = link->supplier; | |
457 | struct device *con = link->consumer; | |
458 | char *buf; | |
459 | ||
460 | len = max(strlen(dev_name(sup)), strlen(dev_name(con))); | |
461 | len += strlen("supplier:") + 1; | |
462 | buf = kzalloc(len, GFP_KERNEL); | |
463 | if (!buf) | |
464 | return -ENOMEM; | |
465 | ||
466 | ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); | |
467 | if (ret) | |
468 | goto out; | |
469 | ||
470 | ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); | |
471 | if (ret) | |
472 | goto err_con; | |
473 | ||
474 | snprintf(buf, len, "consumer:%s", dev_name(con)); | |
475 | ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); | |
476 | if (ret) | |
477 | goto err_con_dev; | |
478 | ||
479 | snprintf(buf, len, "supplier:%s", dev_name(sup)); | |
480 | ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); | |
481 | if (ret) | |
482 | goto err_sup_dev; | |
483 | ||
484 | goto out; | |
485 | ||
486 | err_sup_dev: | |
487 | snprintf(buf, len, "consumer:%s", dev_name(con)); | |
488 | sysfs_remove_link(&sup->kobj, buf); | |
489 | err_con_dev: | |
490 | sysfs_remove_link(&link->link_dev.kobj, "consumer"); | |
491 | err_con: | |
492 | sysfs_remove_link(&link->link_dev.kobj, "supplier"); | |
493 | out: | |
494 | kfree(buf); | |
495 | return ret; | |
496 | } | |
497 | ||
498 | static void devlink_remove_symlinks(struct device *dev, | |
499 | struct class_interface *class_intf) | |
500 | { | |
501 | struct device_link *link = to_devlink(dev); | |
502 | size_t len; | |
503 | struct device *sup = link->supplier; | |
504 | struct device *con = link->consumer; | |
505 | char *buf; | |
506 | ||
507 | sysfs_remove_link(&link->link_dev.kobj, "consumer"); | |
508 | sysfs_remove_link(&link->link_dev.kobj, "supplier"); | |
509 | ||
510 | len = max(strlen(dev_name(sup)), strlen(dev_name(con))); | |
511 | len += strlen("supplier:") + 1; | |
512 | buf = kzalloc(len, GFP_KERNEL); | |
513 | if (!buf) { | |
514 | WARN(1, "Unable to properly free device link symlinks!\n"); | |
515 | return; | |
516 | } | |
517 | ||
518 | snprintf(buf, len, "supplier:%s", dev_name(sup)); | |
519 | sysfs_remove_link(&con->kobj, buf); | |
520 | snprintf(buf, len, "consumer:%s", dev_name(con)); | |
521 | sysfs_remove_link(&sup->kobj, buf); | |
522 | kfree(buf); | |
523 | } | |
524 | ||
525 | static struct class_interface devlink_class_intf = { | |
526 | .class = &devlink_class, | |
527 | .add_dev = devlink_add_symlinks, | |
528 | .remove_dev = devlink_remove_symlinks, | |
529 | }; | |
530 | ||
531 | static int __init devlink_class_init(void) | |
532 | { | |
533 | int ret; | |
534 | ||
535 | ret = class_register(&devlink_class); | |
536 | if (ret) | |
537 | return ret; | |
538 | ||
539 | ret = class_interface_register(&devlink_class_intf); | |
540 | if (ret) | |
541 | class_unregister(&devlink_class); | |
542 | ||
543 | return ret; | |
544 | } | |
545 | postcore_initcall(devlink_class_init); | |
546 | ||
515db266 RW |
547 | #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ |
548 | DL_FLAG_AUTOREMOVE_SUPPLIER | \ | |
05ef983e SK |
549 | DL_FLAG_AUTOPROBE_CONSUMER | \ |
550 | DL_FLAG_SYNC_STATE_ONLY) | |
515db266 | 551 | |
fb583c8e RW |
552 | #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ |
553 | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) | |
554 | ||
9ed98953 RW |
555 | /** |
556 | * device_link_add - Create a link between two devices. | |
557 | * @consumer: Consumer end of the link. | |
558 | * @supplier: Supplier end of the link. | |
559 | * @flags: Link flags. | |
560 | * | |
21d5c57b RW |
561 | * The caller is responsible for the proper synchronization of the link creation |
562 | * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the | |
563 | * runtime PM framework to take the link into account. Second, if the | |
564 | * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will | |
565 | * be forced into the active metastate and reference-counted upon the creation | |
566 | * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be | |
567 | * ignored. | |
568 | * | |
515db266 RW |
569 | * If DL_FLAG_STATELESS is set in @flags, the caller of this function is |
570 | * expected to release the link returned by it directly with the help of either | |
571 | * device_link_del() or device_link_remove(). | |
72175d4e RW |
572 | * |
573 | * If that flag is not set, however, the caller of this function is handing the | |
574 | * management of the link over to the driver core entirely and its return value | |
575 | * can only be used to check whether or not the link is present. In that case, | |
576 | * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link | |
577 | * flags can be used to indicate to the driver core when the link can be safely | |
578 | * deleted. Namely, setting one of them in @flags indicates to the driver core | |
579 | * that the link is not going to be used (by the given caller of this function) | |
580 | * after unbinding the consumer or supplier driver, respectively, from its | |
581 | * device, so the link can be deleted at that point. If none of them is set, | |
582 | * the link will be maintained until one of the devices pointed to by it (either | |
583 | * the consumer or the supplier) is unregistered. | |
c8d50986 | 584 | * |
e7dd4010 RW |
585 | * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and |
586 | * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent | |
587 | * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can | |
588 | * be used to request the driver core to automaticall probe for a consmer | |
589 | * driver after successfully binding a driver to the supplier device. | |
590 | * | |
515db266 RW |
591 | * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, |
592 | * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at | |
593 | * the same time is invalid and will cause NULL to be returned upfront. | |
594 | * However, if a device link between the given @consumer and @supplier pair | |
595 | * exists already when this function is called for them, the existing link will | |
596 | * be returned regardless of its current type and status (the link's flags may | |
597 | * be modified then). The caller of this function is then expected to treat | |
598 | * the link as though it has just been created, so (in particular) if | |
599 | * DL_FLAG_STATELESS was passed in @flags, the link needs to be released | |
600 | * explicitly when not needed any more (as stated above). | |
9ed98953 RW |
601 | * |
602 | * A side effect of the link creation is re-ordering of dpm_list and the | |
603 | * devices_kset list by moving the consumer device and all devices depending | |
604 | * on it to the ends of these lists (that does not happen to devices that have | |
605 | * not been registered when this function is called). | |
606 | * | |
607 | * The supplier device is required to be registered when this function is called | |
608 | * and NULL will be returned if that is not the case. The consumer device need | |
64df1148 | 609 | * not be registered, however. |
9ed98953 RW |
610 | */ |
611 | struct device_link *device_link_add(struct device *consumer, | |
612 | struct device *supplier, u32 flags) | |
613 | { | |
614 | struct device_link *link; | |
615 | ||
fb583c8e | 616 | if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || |
515db266 | 617 | (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || |
05ef983e SK |
618 | (flags & DL_FLAG_SYNC_STATE_ONLY && |
619 | flags != DL_FLAG_SYNC_STATE_ONLY) || | |
e7dd4010 RW |
620 | (flags & DL_FLAG_AUTOPROBE_CONSUMER && |
621 | flags & (DL_FLAG_AUTOREMOVE_CONSUMER | | |
622 | DL_FLAG_AUTOREMOVE_SUPPLIER))) | |
9ed98953 RW |
623 | return NULL; |
624 | ||
5db25c9e RW |
625 | if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { |
626 | if (pm_runtime_get_sync(supplier) < 0) { | |
627 | pm_runtime_put_noidle(supplier); | |
628 | return NULL; | |
629 | } | |
5db25c9e RW |
630 | } |
631 | ||
515db266 RW |
632 | if (!(flags & DL_FLAG_STATELESS)) |
633 | flags |= DL_FLAG_MANAGED; | |
634 | ||
9ed98953 RW |
635 | device_links_write_lock(); |
636 | device_pm_lock(); | |
637 | ||
638 | /* | |
639 | * If the supplier has not been fully registered yet or there is a | |
05ef983e SK |
640 | * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and |
641 | * the supplier already in the graph, return NULL. If the link is a | |
642 | * SYNC_STATE_ONLY link, we don't check for reverse dependencies | |
643 | * because it only affects sync_state() callbacks. | |
9ed98953 RW |
644 | */ |
645 | if (!device_pm_initialized(supplier) | |
05ef983e SK |
646 | || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && |
647 | device_is_dependent(consumer, supplier))) { | |
9ed98953 RW |
648 | link = NULL; |
649 | goto out; | |
650 | } | |
651 | ||
72175d4e RW |
652 | /* |
653 | * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed | |
654 | * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both | |
655 | * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. | |
656 | */ | |
657 | if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) | |
658 | flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; | |
659 | ||
f265df55 RW |
660 | list_for_each_entry(link, &supplier->links.consumers, s_node) { |
661 | if (link->consumer != consumer) | |
662 | continue; | |
663 | ||
e2f3cd83 RW |
664 | if (flags & DL_FLAG_PM_RUNTIME) { |
665 | if (!(link->flags & DL_FLAG_PM_RUNTIME)) { | |
4c06c4e6 | 666 | pm_runtime_new_link(consumer); |
e2f3cd83 RW |
667 | link->flags |= DL_FLAG_PM_RUNTIME; |
668 | } | |
669 | if (flags & DL_FLAG_RPM_ACTIVE) | |
36003d4c | 670 | refcount_inc(&link->rpm_active); |
e2f3cd83 RW |
671 | } |
672 | ||
72175d4e RW |
673 | if (flags & DL_FLAG_STATELESS) { |
674 | kref_get(&link->kref); | |
05ef983e | 675 | if (link->flags & DL_FLAG_SYNC_STATE_ONLY && |
44e96049 SK |
676 | !(link->flags & DL_FLAG_STATELESS)) { |
677 | link->flags |= DL_FLAG_STATELESS; | |
05ef983e | 678 | goto reorder; |
44e96049 SK |
679 | } else { |
680 | link->flags |= DL_FLAG_STATELESS; | |
05ef983e | 681 | goto out; |
44e96049 | 682 | } |
72175d4e RW |
683 | } |
684 | ||
685 | /* | |
686 | * If the life time of the link following from the new flags is | |
687 | * longer than indicated by the flags of the existing link, | |
688 | * update the existing link to stay around longer. | |
689 | */ | |
690 | if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { | |
691 | if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { | |
692 | link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; | |
693 | link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; | |
694 | } | |
695 | } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { | |
696 | link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | | |
697 | DL_FLAG_AUTOREMOVE_SUPPLIER); | |
698 | } | |
515db266 RW |
699 | if (!(link->flags & DL_FLAG_MANAGED)) { |
700 | kref_get(&link->kref); | |
701 | link->flags |= DL_FLAG_MANAGED; | |
702 | device_link_init_status(link, consumer, supplier); | |
703 | } | |
05ef983e SK |
704 | if (link->flags & DL_FLAG_SYNC_STATE_ONLY && |
705 | !(flags & DL_FLAG_SYNC_STATE_ONLY)) { | |
706 | link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; | |
707 | goto reorder; | |
708 | } | |
709 | ||
f265df55 RW |
710 | goto out; |
711 | } | |
712 | ||
21d5c57b | 713 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
9ed98953 RW |
714 | if (!link) |
715 | goto out; | |
716 | ||
e2f3cd83 RW |
717 | refcount_set(&link->rpm_active, 1); |
718 | ||
9ed98953 RW |
719 | get_device(supplier); |
720 | link->supplier = supplier; | |
721 | INIT_LIST_HEAD(&link->s_node); | |
722 | get_device(consumer); | |
723 | link->consumer = consumer; | |
724 | INIT_LIST_HEAD(&link->c_node); | |
725 | link->flags = flags; | |
ead18c23 | 726 | kref_init(&link->kref); |
9ed98953 | 727 | |
287905e6 SK |
728 | link->link_dev.class = &devlink_class; |
729 | device_set_pm_not_required(&link->link_dev); | |
90b109d5 | 730 | dev_set_name(&link->link_dev, "%s--%s", |
287905e6 SK |
731 | dev_name(supplier), dev_name(consumer)); |
732 | if (device_register(&link->link_dev)) { | |
733 | put_device(consumer); | |
734 | put_device(supplier); | |
735 | kfree(link); | |
736 | link = NULL; | |
737 | goto out; | |
738 | } | |
739 | ||
740 | if (flags & DL_FLAG_PM_RUNTIME) { | |
741 | if (flags & DL_FLAG_RPM_ACTIVE) | |
742 | refcount_inc(&link->rpm_active); | |
743 | ||
744 | pm_runtime_new_link(consumer); | |
745 | } | |
746 | ||
64df1148 | 747 | /* Determine the initial link state. */ |
515db266 | 748 | if (flags & DL_FLAG_STATELESS) |
9ed98953 | 749 | link->status = DL_STATE_NONE; |
515db266 RW |
750 | else |
751 | device_link_init_status(link, consumer, supplier); | |
9ed98953 | 752 | |
15cfb094 RW |
753 | /* |
754 | * Some callers expect the link creation during consumer driver probe to | |
755 | * resume the supplier even without DL_FLAG_RPM_ACTIVE. | |
756 | */ | |
757 | if (link->status == DL_STATE_CONSUMER_PROBE && | |
758 | flags & DL_FLAG_PM_RUNTIME) | |
759 | pm_runtime_resume(supplier); | |
760 | ||
21c27f06 SK |
761 | list_add_tail_rcu(&link->s_node, &supplier->links.consumers); |
762 | list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); | |
763 | ||
05ef983e SK |
764 | if (flags & DL_FLAG_SYNC_STATE_ONLY) { |
765 | dev_dbg(consumer, | |
766 | "Linked as a sync state only consumer to %s\n", | |
767 | dev_name(supplier)); | |
768 | goto out; | |
769 | } | |
21c27f06 | 770 | |
05ef983e | 771 | reorder: |
9ed98953 RW |
772 | /* |
773 | * Move the consumer and all of the devices depending on it to the end | |
774 | * of dpm_list and the devices_kset list. | |
775 | * | |
776 | * It is necessary to hold dpm_list locked throughout all that or else | |
777 | * we may end up suspending with a wrong ordering of it. | |
778 | */ | |
779 | device_reorder_to_tail(consumer, NULL); | |
780 | ||
8a4b3269 | 781 | dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); |
9ed98953 | 782 | |
21c27f06 | 783 | out: |
9ed98953 RW |
784 | device_pm_unlock(); |
785 | device_links_write_unlock(); | |
5db25c9e | 786 | |
e2f3cd83 | 787 | if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) |
5db25c9e RW |
788 | pm_runtime_put(supplier); |
789 | ||
9ed98953 RW |
790 | return link; |
791 | } | |
792 | EXPORT_SYMBOL_GPL(device_link_add); | |
793 | ||
e2ae9bcc SK |
794 | /** |
795 | * device_link_wait_for_supplier - Add device to wait_for_suppliers list | |
796 | * @consumer: Consumer device | |
797 | * | |
798 | * Marks the @consumer device as waiting for suppliers to become available by | |
799 | * adding it to the wait_for_suppliers list. The consumer device will never be | |
800 | * probed until it's removed from the wait_for_suppliers list. | |
801 | * | |
802 | * The caller is responsible for adding the links to the supplier devices once | |
803 | * they are available and removing the @consumer device from the | |
804 | * wait_for_suppliers list once links to all the suppliers have been created. | |
805 | * | |
806 | * This function is NOT meant to be called from the probe function of the | |
807 | * consumer but rather from code that creates/adds the consumer device. | |
808 | */ | |
bcbbcfd5 SK |
809 | static void device_link_wait_for_supplier(struct device *consumer, |
810 | bool need_for_probe) | |
e2ae9bcc SK |
811 | { |
812 | mutex_lock(&wfs_lock); | |
813 | list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers); | |
bcbbcfd5 | 814 | consumer->links.need_for_probe = need_for_probe; |
e2ae9bcc SK |
815 | mutex_unlock(&wfs_lock); |
816 | } | |
817 | ||
bcbbcfd5 SK |
818 | static void device_link_wait_for_mandatory_supplier(struct device *consumer) |
819 | { | |
820 | device_link_wait_for_supplier(consumer, true); | |
821 | } | |
822 | ||
823 | static void device_link_wait_for_optional_supplier(struct device *consumer) | |
824 | { | |
825 | device_link_wait_for_supplier(consumer, false); | |
826 | } | |
827 | ||
e2ae9bcc SK |
828 | /** |
829 | * device_link_add_missing_supplier_links - Add links from consumer devices to | |
830 | * supplier devices, leaving any | |
831 | * consumer with inactive suppliers on | |
832 | * the wait_for_suppliers list | |
833 | * | |
834 | * Loops through all consumers waiting on suppliers and tries to add all their | |
835 | * supplier links. If that succeeds, the consumer device is removed from | |
836 | * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers | |
837 | * list. Devices left on the wait_for_suppliers list will not be probed. | |
838 | * | |
839 | * The fwnode add_links callback is expected to return 0 if it has found and | |
840 | * added all the supplier links for the consumer device. It should return an | |
841 | * error if it isn't able to do so. | |
842 | * | |
843 | * The caller of device_link_wait_for_supplier() is expected to call this once | |
844 | * it's aware of potential suppliers becoming available. | |
845 | */ | |
846 | static void device_link_add_missing_supplier_links(void) | |
847 | { | |
848 | struct device *dev, *tmp; | |
849 | ||
850 | mutex_lock(&wfs_lock); | |
851 | list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, | |
1745d299 SK |
852 | links.needs_suppliers) { |
853 | int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); | |
854 | if (!ret) | |
e2ae9bcc | 855 | list_del_init(&dev->links.needs_suppliers); |
c84b9090 | 856 | else if (ret != -ENODEV) |
1745d299 SK |
857 | dev->links.need_for_probe = false; |
858 | } | |
e2ae9bcc SK |
859 | mutex_unlock(&wfs_lock); |
860 | } | |
861 | ||
9ed98953 | 862 | #ifdef CONFIG_SRCU |
ead18c23 | 863 | static void __device_link_del(struct kref *kref) |
9ed98953 | 864 | { |
ead18c23 LW |
865 | struct device_link *link = container_of(kref, struct device_link, kref); |
866 | ||
8a4b3269 JB |
867 | dev_dbg(link->consumer, "Dropping the link to %s\n", |
868 | dev_name(link->supplier)); | |
9ed98953 | 869 | |
e0e398e2 | 870 | pm_runtime_drop_link(link); |
baa8809f | 871 | |
9ed98953 RW |
872 | list_del_rcu(&link->s_node); |
873 | list_del_rcu(&link->c_node); | |
843e600b | 874 | device_unregister(&link->link_dev); |
9ed98953 RW |
875 | } |
876 | #else /* !CONFIG_SRCU */ | |
ead18c23 | 877 | static void __device_link_del(struct kref *kref) |
9ed98953 | 878 | { |
ead18c23 LW |
879 | struct device_link *link = container_of(kref, struct device_link, kref); |
880 | ||
9ed98953 RW |
881 | dev_info(link->consumer, "Dropping the link to %s\n", |
882 | dev_name(link->supplier)); | |
883 | ||
e0e398e2 | 884 | pm_runtime_drop_link(link); |
433986c2 | 885 | |
9ed98953 RW |
886 | list_del(&link->s_node); |
887 | list_del(&link->c_node); | |
843e600b | 888 | device_unregister(&link->link_dev); |
9ed98953 RW |
889 | } |
890 | #endif /* !CONFIG_SRCU */ | |
891 | ||
72175d4e RW |
892 | static void device_link_put_kref(struct device_link *link) |
893 | { | |
894 | if (link->flags & DL_FLAG_STATELESS) | |
895 | kref_put(&link->kref, __device_link_del); | |
896 | else | |
897 | WARN(1, "Unable to drop a managed device link reference\n"); | |
898 | } | |
899 | ||
9ed98953 | 900 | /** |
72175d4e | 901 | * device_link_del - Delete a stateless link between two devices. |
9ed98953 RW |
902 | * @link: Device link to delete. |
903 | * | |
904 | * The caller must ensure proper synchronization of this function with runtime | |
ead18c23 LW |
905 | * PM. If the link was added multiple times, it needs to be deleted as often. |
906 | * Care is required for hotplugged devices: Their links are purged on removal | |
907 | * and calling device_link_del() is then no longer allowed. | |
9ed98953 RW |
908 | */ |
909 | void device_link_del(struct device_link *link) | |
910 | { | |
911 | device_links_write_lock(); | |
72175d4e | 912 | device_link_put_kref(link); |
9ed98953 RW |
913 | device_links_write_unlock(); |
914 | } | |
915 | EXPORT_SYMBOL_GPL(device_link_del); | |
916 | ||
d8842211 | 917 | /** |
72175d4e | 918 | * device_link_remove - Delete a stateless link between two devices. |
d8842211 | 919 | * @consumer: Consumer end of the link. |
920 | * @supplier: Supplier end of the link. | |
921 | * | |
922 | * The caller must ensure proper synchronization of this function with runtime | |
923 | * PM. | |
924 | */ | |
925 | void device_link_remove(void *consumer, struct device *supplier) | |
926 | { | |
927 | struct device_link *link; | |
928 | ||
929 | if (WARN_ON(consumer == supplier)) | |
930 | return; | |
931 | ||
932 | device_links_write_lock(); | |
d8842211 | 933 | |
934 | list_for_each_entry(link, &supplier->links.consumers, s_node) { | |
935 | if (link->consumer == consumer) { | |
72175d4e | 936 | device_link_put_kref(link); |
d8842211 | 937 | break; |
938 | } | |
939 | } | |
940 | ||
d8842211 | 941 | device_links_write_unlock(); |
942 | } | |
943 | EXPORT_SYMBOL_GPL(device_link_remove); | |
944 | ||
9ed98953 RW |
945 | static void device_links_missing_supplier(struct device *dev) |
946 | { | |
947 | struct device_link *link; | |
948 | ||
8c3e315d SK |
949 | list_for_each_entry(link, &dev->links.suppliers, c_node) { |
950 | if (link->status != DL_STATE_CONSUMER_PROBE) | |
951 | continue; | |
952 | ||
953 | if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { | |
9ed98953 | 954 | WRITE_ONCE(link->status, DL_STATE_AVAILABLE); |
8c3e315d SK |
955 | } else { |
956 | WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); | |
957 | WRITE_ONCE(link->status, DL_STATE_DORMANT); | |
958 | } | |
959 | } | |
9ed98953 RW |
960 | } |
961 | ||
962 | /** | |
963 | * device_links_check_suppliers - Check presence of supplier drivers. | |
964 | * @dev: Consumer device. | |
965 | * | |
966 | * Check links from this device to any suppliers. Walk the list of the device's | |
967 | * links to suppliers and see if all of them are available. If not, simply | |
968 | * return -EPROBE_DEFER. | |
969 | * | |
970 | * We need to guarantee that the supplier will not go away after the check has | |
971 | * been positive here. It only can go away in __device_release_driver() and | |
972 | * that function checks the device's links to consumers. This means we need to | |
973 | * mark the link as "consumer probe in progress" to make the supplier removal | |
974 | * wait for us to complete (or bad things may happen). | |
975 | * | |
515db266 | 976 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
9ed98953 RW |
977 | */ |
978 | int device_links_check_suppliers(struct device *dev) | |
979 | { | |
980 | struct device_link *link; | |
981 | int ret = 0; | |
982 | ||
e2ae9bcc SK |
983 | /* |
984 | * Device waiting for supplier to become available is not allowed to | |
985 | * probe. | |
986 | */ | |
987 | mutex_lock(&wfs_lock); | |
bcbbcfd5 SK |
988 | if (!list_empty(&dev->links.needs_suppliers) && |
989 | dev->links.need_for_probe) { | |
e2ae9bcc SK |
990 | mutex_unlock(&wfs_lock); |
991 | return -EPROBE_DEFER; | |
992 | } | |
993 | mutex_unlock(&wfs_lock); | |
994 | ||
9ed98953 RW |
995 | device_links_write_lock(); |
996 | ||
997 | list_for_each_entry(link, &dev->links.suppliers, c_node) { | |
8c3e315d | 998 | if (!(link->flags & DL_FLAG_MANAGED)) |
9ed98953 RW |
999 | continue; |
1000 | ||
8c3e315d SK |
1001 | if (link->status != DL_STATE_AVAILABLE && |
1002 | !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { | |
9ed98953 RW |
1003 | device_links_missing_supplier(dev); |
1004 | ret = -EPROBE_DEFER; | |
1005 | break; | |
1006 | } | |
1007 | WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); | |
1008 | } | |
1009 | dev->links.status = DL_DEV_PROBING; | |
1010 | ||
1011 | device_links_write_unlock(); | |
1012 | return ret; | |
1013 | } | |
1014 | ||
26e77708 SK |
1015 | /** |
1016 | * __device_links_queue_sync_state - Queue a device for sync_state() callback | |
1017 | * @dev: Device to call sync_state() on | |
1018 | * @list: List head to queue the @dev on | |
1019 | * | |
1020 | * Queues a device for a sync_state() callback when the device links write lock | |
1021 | * isn't held. This allows the sync_state() execution flow to use device links | |
1022 | * APIs. The caller must ensure this function is called with | |
1023 | * device_links_write_lock() held. | |
1024 | * | |
1025 | * This function does a get_device() to make sure the device is not freed while | |
1026 | * on this list. | |
1027 | * | |
1028 | * So the caller must also ensure that device_links_flush_sync_list() is called | |
1029 | * as soon as the caller releases device_links_write_lock(). This is necessary | |
1030 | * to make sure the sync_state() is called in a timely fashion and the | |
1031 | * put_device() is called on this device. | |
1032 | */ | |
1033 | static void __device_links_queue_sync_state(struct device *dev, | |
1034 | struct list_head *list) | |
fc5a251d SK |
1035 | { |
1036 | struct device_link *link; | |
1037 | ||
77036165 SK |
1038 | if (!dev_has_sync_state(dev)) |
1039 | return; | |
fc5a251d SK |
1040 | if (dev->state_synced) |
1041 | return; | |
1042 | ||
1043 | list_for_each_entry(link, &dev->links.consumers, s_node) { | |
1044 | if (!(link->flags & DL_FLAG_MANAGED)) | |
1045 | continue; | |
1046 | if (link->status != DL_STATE_ACTIVE) | |
1047 | return; | |
1048 | } | |
1049 | ||
26e77708 SK |
1050 | /* |
1051 | * Set the flag here to avoid adding the same device to a list more | |
1052 | * than once. This can happen if new consumers get added to the device | |
1053 | * and probed before the list is flushed. | |
1054 | */ | |
fc5a251d | 1055 | dev->state_synced = true; |
26e77708 | 1056 | |
3b052a3e | 1057 | if (WARN_ON(!list_empty(&dev->links.defer_sync))) |
26e77708 SK |
1058 | return; |
1059 | ||
1060 | get_device(dev); | |
3b052a3e | 1061 | list_add_tail(&dev->links.defer_sync, list); |
26e77708 SK |
1062 | } |
1063 | ||
1064 | /** | |
1065 | * device_links_flush_sync_list - Call sync_state() on a list of devices | |
1066 | * @list: List of devices to call sync_state() on | |
21eb93f4 | 1067 | * @dont_lock_dev: Device for which lock is already held by the caller |
26e77708 SK |
1068 | * |
1069 | * Calls sync_state() on all the devices that have been queued for it. This | |
21eb93f4 SK |
1070 | * function is used in conjunction with __device_links_queue_sync_state(). The |
1071 | * @dont_lock_dev parameter is useful when this function is called from a | |
1072 | * context where a device lock is already held. | |
26e77708 | 1073 | */ |
21eb93f4 SK |
1074 | static void device_links_flush_sync_list(struct list_head *list, |
1075 | struct device *dont_lock_dev) | |
26e77708 SK |
1076 | { |
1077 | struct device *dev, *tmp; | |
1078 | ||
3b052a3e SK |
1079 | list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { |
1080 | list_del_init(&dev->links.defer_sync); | |
26e77708 | 1081 | |
21eb93f4 SK |
1082 | if (dev != dont_lock_dev) |
1083 | device_lock(dev); | |
26e77708 SK |
1084 | |
1085 | if (dev->bus->sync_state) | |
1086 | dev->bus->sync_state(dev); | |
1087 | else if (dev->driver && dev->driver->sync_state) | |
1088 | dev->driver->sync_state(dev); | |
1089 | ||
21eb93f4 SK |
1090 | if (dev != dont_lock_dev) |
1091 | device_unlock(dev); | |
26e77708 SK |
1092 | |
1093 | put_device(dev); | |
1094 | } | |
fc5a251d SK |
1095 | } |
1096 | ||
1097 | void device_links_supplier_sync_state_pause(void) | |
1098 | { | |
1099 | device_links_write_lock(); | |
1100 | defer_sync_state_count++; | |
1101 | device_links_write_unlock(); | |
1102 | } | |
1103 | ||
1104 | void device_links_supplier_sync_state_resume(void) | |
1105 | { | |
1106 | struct device *dev, *tmp; | |
26e77708 | 1107 | LIST_HEAD(sync_list); |
fc5a251d SK |
1108 | |
1109 | device_links_write_lock(); | |
1110 | if (!defer_sync_state_count) { | |
1111 | WARN(true, "Unmatched sync_state pause/resume!"); | |
1112 | goto out; | |
1113 | } | |
1114 | defer_sync_state_count--; | |
1115 | if (defer_sync_state_count) | |
1116 | goto out; | |
1117 | ||
3b052a3e | 1118 | list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { |
26e77708 SK |
1119 | /* |
1120 | * Delete from deferred_sync list before queuing it to | |
3b052a3e | 1121 | * sync_list because defer_sync is used for both lists. |
26e77708 | 1122 | */ |
3b052a3e | 1123 | list_del_init(&dev->links.defer_sync); |
26e77708 | 1124 | __device_links_queue_sync_state(dev, &sync_list); |
fc5a251d SK |
1125 | } |
1126 | out: | |
1127 | device_links_write_unlock(); | |
26e77708 | 1128 | |
21eb93f4 | 1129 | device_links_flush_sync_list(&sync_list, NULL); |
fc5a251d SK |
1130 | } |
1131 | ||
1132 | static int sync_state_resume_initcall(void) | |
1133 | { | |
1134 | device_links_supplier_sync_state_resume(); | |
1135 | return 0; | |
1136 | } | |
1137 | late_initcall(sync_state_resume_initcall); | |
1138 | ||
1139 | static void __device_links_supplier_defer_sync(struct device *sup) | |
1140 | { | |
3b052a3e SK |
1141 | if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) |
1142 | list_add_tail(&sup->links.defer_sync, &deferred_sync); | |
fc5a251d SK |
1143 | } |
1144 | ||
21c27f06 SK |
1145 | static void device_link_drop_managed(struct device_link *link) |
1146 | { | |
1147 | link->flags &= ~DL_FLAG_MANAGED; | |
1148 | WRITE_ONCE(link->status, DL_STATE_NONE); | |
1149 | kref_put(&link->kref, __device_link_del); | |
1150 | } | |
1151 | ||
da6d6475 SK |
1152 | static ssize_t waiting_for_supplier_show(struct device *dev, |
1153 | struct device_attribute *attr, | |
1154 | char *buf) | |
1155 | { | |
1156 | bool val; | |
1157 | ||
1158 | device_lock(dev); | |
da6d6475 SK |
1159 | val = !list_empty(&dev->links.needs_suppliers) |
1160 | && dev->links.need_for_probe; | |
da6d6475 | 1161 | device_unlock(dev); |
aa838896 | 1162 | return sysfs_emit(buf, "%u\n", val); |
da6d6475 SK |
1163 | } |
1164 | static DEVICE_ATTR_RO(waiting_for_supplier); | |
1165 | ||
9ed98953 RW |
1166 | /** |
1167 | * device_links_driver_bound - Update device links after probing its driver. | |
1168 | * @dev: Device to update the links for. | |
1169 | * | |
1170 | * The probe has been successful, so update links from this device to any | |
1171 | * consumers by changing their status to "available". | |
1172 | * | |
1173 | * Also change the status of @dev's links to suppliers to "active". | |
1174 | * | |
515db266 | 1175 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
9ed98953 RW |
1176 | */ |
1177 | void device_links_driver_bound(struct device *dev) | |
1178 | { | |
21c27f06 | 1179 | struct device_link *link, *ln; |
26e77708 | 1180 | LIST_HEAD(sync_list); |
9ed98953 | 1181 | |
bcbbcfd5 SK |
1182 | /* |
1183 | * If a device probes successfully, it's expected to have created all | |
1184 | * the device links it needs to or make new device links as it needs | |
1185 | * them. So, it no longer needs to wait on any suppliers. | |
1186 | */ | |
1187 | mutex_lock(&wfs_lock); | |
1188 | list_del_init(&dev->links.needs_suppliers); | |
1189 | mutex_unlock(&wfs_lock); | |
da6d6475 | 1190 | device_remove_file(dev, &dev_attr_waiting_for_supplier); |
bcbbcfd5 | 1191 | |
9ed98953 RW |
1192 | device_links_write_lock(); |
1193 | ||
1194 | list_for_each_entry(link, &dev->links.consumers, s_node) { | |
515db266 | 1195 | if (!(link->flags & DL_FLAG_MANAGED)) |
9ed98953 RW |
1196 | continue; |
1197 | ||
15cfb094 RW |
1198 | /* |
1199 | * Links created during consumer probe may be in the "consumer | |
1200 | * probe" state to start with if the supplier is still probing | |
1201 | * when they are created and they may become "active" if the | |
1202 | * consumer probe returns first. Skip them here. | |
1203 | */ | |
1204 | if (link->status == DL_STATE_CONSUMER_PROBE || | |
1205 | link->status == DL_STATE_ACTIVE) | |
1206 | continue; | |
1207 | ||
9ed98953 RW |
1208 | WARN_ON(link->status != DL_STATE_DORMANT); |
1209 | WRITE_ONCE(link->status, DL_STATE_AVAILABLE); | |
e7dd4010 RW |
1210 | |
1211 | if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) | |
1212 | driver_deferred_probe_add(link->consumer); | |
9ed98953 RW |
1213 | } |
1214 | ||
21eb93f4 SK |
1215 | if (defer_sync_state_count) |
1216 | __device_links_supplier_defer_sync(dev); | |
1217 | else | |
1218 | __device_links_queue_sync_state(dev, &sync_list); | |
1219 | ||
21c27f06 SK |
1220 | list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { |
1221 | struct device *supplier; | |
1222 | ||
515db266 | 1223 | if (!(link->flags & DL_FLAG_MANAGED)) |
9ed98953 RW |
1224 | continue; |
1225 | ||
21c27f06 SK |
1226 | supplier = link->supplier; |
1227 | if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { | |
1228 | /* | |
1229 | * When DL_FLAG_SYNC_STATE_ONLY is set, it means no | |
1230 | * other DL_MANAGED_LINK_FLAGS have been set. So, it's | |
1231 | * save to drop the managed link completely. | |
1232 | */ | |
1233 | device_link_drop_managed(link); | |
1234 | } else { | |
1235 | WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); | |
1236 | WRITE_ONCE(link->status, DL_STATE_ACTIVE); | |
1237 | } | |
fc5a251d | 1238 | |
21c27f06 SK |
1239 | /* |
1240 | * This needs to be done even for the deleted | |
1241 | * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last | |
1242 | * device link that was preventing the supplier from getting a | |
1243 | * sync_state() call. | |
1244 | */ | |
fc5a251d | 1245 | if (defer_sync_state_count) |
21c27f06 | 1246 | __device_links_supplier_defer_sync(supplier); |
fc5a251d | 1247 | else |
21c27f06 | 1248 | __device_links_queue_sync_state(supplier, &sync_list); |
9ed98953 RW |
1249 | } |
1250 | ||
1251 | dev->links.status = DL_DEV_DRIVER_BOUND; | |
1252 | ||
1253 | device_links_write_unlock(); | |
26e77708 | 1254 | |
21eb93f4 | 1255 | device_links_flush_sync_list(&sync_list, dev); |
9ed98953 RW |
1256 | } |
1257 | ||
1258 | /** | |
1259 | * __device_links_no_driver - Update links of a device without a driver. | |
1260 | * @dev: Device without a drvier. | |
1261 | * | |
1262 | * Delete all non-persistent links from this device to any suppliers. | |
1263 | * | |
1264 | * Persistent links stay around, but their status is changed to "available", | |
1265 | * unless they already are in the "supplier unbind in progress" state in which | |
1266 | * case they need not be updated. | |
1267 | * | |
515db266 | 1268 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
9ed98953 RW |
1269 | */ |
1270 | static void __device_links_no_driver(struct device *dev) | |
1271 | { | |
1272 | struct device_link *link, *ln; | |
1273 | ||
1274 | list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { | |
515db266 | 1275 | if (!(link->flags & DL_FLAG_MANAGED)) |
9ed98953 RW |
1276 | continue; |
1277 | ||
8c3e315d | 1278 | if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { |
515db266 | 1279 | device_link_drop_managed(link); |
8c3e315d SK |
1280 | continue; |
1281 | } | |
1282 | ||
1283 | if (link->status != DL_STATE_CONSUMER_PROBE && | |
1284 | link->status != DL_STATE_ACTIVE) | |
1285 | continue; | |
1286 | ||
1287 | if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { | |
9ed98953 | 1288 | WRITE_ONCE(link->status, DL_STATE_AVAILABLE); |
8c3e315d SK |
1289 | } else { |
1290 | WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); | |
1291 | WRITE_ONCE(link->status, DL_STATE_DORMANT); | |
1292 | } | |
9ed98953 RW |
1293 | } |
1294 | ||
1295 | dev->links.status = DL_DEV_NO_DRIVER; | |
1296 | } | |
1297 | ||
15cfb094 RW |
1298 | /** |
1299 | * device_links_no_driver - Update links after failing driver probe. | |
1300 | * @dev: Device whose driver has just failed to probe. | |
1301 | * | |
1302 | * Clean up leftover links to consumers for @dev and invoke | |
1303 | * %__device_links_no_driver() to update links to suppliers for it as | |
1304 | * appropriate. | |
1305 | * | |
515db266 | 1306 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
15cfb094 | 1307 | */ |
9ed98953 RW |
1308 | void device_links_no_driver(struct device *dev) |
1309 | { | |
15cfb094 RW |
1310 | struct device_link *link; |
1311 | ||
9ed98953 | 1312 | device_links_write_lock(); |
15cfb094 RW |
1313 | |
1314 | list_for_each_entry(link, &dev->links.consumers, s_node) { | |
515db266 | 1315 | if (!(link->flags & DL_FLAG_MANAGED)) |
15cfb094 RW |
1316 | continue; |
1317 | ||
1318 | /* | |
1319 | * The probe has failed, so if the status of the link is | |
1320 | * "consumer probe" or "active", it must have been added by | |
1321 | * a probing consumer while this device was still probing. | |
1322 | * Change its state to "dormant", as it represents a valid | |
1323 | * relationship, but it is not functionally meaningful. | |
1324 | */ | |
1325 | if (link->status == DL_STATE_CONSUMER_PROBE || | |
1326 | link->status == DL_STATE_ACTIVE) | |
1327 | WRITE_ONCE(link->status, DL_STATE_DORMANT); | |
1328 | } | |
1329 | ||
9ed98953 | 1330 | __device_links_no_driver(dev); |
15cfb094 | 1331 | |
9ed98953 RW |
1332 | device_links_write_unlock(); |
1333 | } | |
1334 | ||
1335 | /** | |
1336 | * device_links_driver_cleanup - Update links after driver removal. | |
1337 | * @dev: Device whose driver has just gone away. | |
1338 | * | |
1339 | * Update links to consumers for @dev by changing their status to "dormant" and | |
1340 | * invoke %__device_links_no_driver() to update links to suppliers for it as | |
1341 | * appropriate. | |
1342 | * | |
515db266 | 1343 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
9ed98953 RW |
1344 | */ |
1345 | void device_links_driver_cleanup(struct device *dev) | |
1346 | { | |
c8d50986 | 1347 | struct device_link *link, *ln; |
9ed98953 RW |
1348 | |
1349 | device_links_write_lock(); | |
1350 | ||
c8d50986 | 1351 | list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { |
515db266 | 1352 | if (!(link->flags & DL_FLAG_MANAGED)) |
9ed98953 RW |
1353 | continue; |
1354 | ||
e88728f4 | 1355 | WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); |
9ed98953 | 1356 | WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); |
1689cac5 VG |
1357 | |
1358 | /* | |
1359 | * autoremove the links between this @dev and its consumer | |
1360 | * devices that are not active, i.e. where the link state | |
1361 | * has moved to DL_STATE_SUPPLIER_UNBIND. | |
1362 | */ | |
1363 | if (link->status == DL_STATE_SUPPLIER_UNBIND && | |
1364 | link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) | |
515db266 | 1365 | device_link_drop_managed(link); |
1689cac5 | 1366 | |
9ed98953 RW |
1367 | WRITE_ONCE(link->status, DL_STATE_DORMANT); |
1368 | } | |
1369 | ||
3b052a3e | 1370 | list_del_init(&dev->links.defer_sync); |
9ed98953 RW |
1371 | __device_links_no_driver(dev); |
1372 | ||
1373 | device_links_write_unlock(); | |
1374 | } | |
1375 | ||
1376 | /** | |
1377 | * device_links_busy - Check if there are any busy links to consumers. | |
1378 | * @dev: Device to check. | |
1379 | * | |
1380 | * Check each consumer of the device and return 'true' if its link's status | |
1381 | * is one of "consumer probe" or "active" (meaning that the given consumer is | |
1382 | * probing right now or its driver is present). Otherwise, change the link | |
1383 | * state to "supplier unbind" to prevent the consumer from being probed | |
1384 | * successfully going forward. | |
1385 | * | |
1386 | * Return 'false' if there are no probing or active consumers. | |
1387 | * | |
515db266 | 1388 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
9ed98953 RW |
1389 | */ |
1390 | bool device_links_busy(struct device *dev) | |
1391 | { | |
1392 | struct device_link *link; | |
1393 | bool ret = false; | |
1394 | ||
1395 | device_links_write_lock(); | |
1396 | ||
1397 | list_for_each_entry(link, &dev->links.consumers, s_node) { | |
515db266 | 1398 | if (!(link->flags & DL_FLAG_MANAGED)) |
9ed98953 RW |
1399 | continue; |
1400 | ||
1401 | if (link->status == DL_STATE_CONSUMER_PROBE | |
1402 | || link->status == DL_STATE_ACTIVE) { | |
1403 | ret = true; | |
1404 | break; | |
1405 | } | |
1406 | WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); | |
1407 | } | |
1408 | ||
1409 | dev->links.status = DL_DEV_UNBINDING; | |
1410 | ||
1411 | device_links_write_unlock(); | |
1412 | return ret; | |
1413 | } | |
1414 | ||
1415 | /** | |
1416 | * device_links_unbind_consumers - Force unbind consumers of the given device. | |
1417 | * @dev: Device to unbind the consumers of. | |
1418 | * | |
1419 | * Walk the list of links to consumers for @dev and if any of them is in the | |
1420 | * "consumer probe" state, wait for all device probes in progress to complete | |
1421 | * and start over. | |
1422 | * | |
1423 | * If that's not the case, change the status of the link to "supplier unbind" | |
1424 | * and check if the link was in the "active" state. If so, force the consumer | |
1425 | * driver to unbind and start over (the consumer will not re-probe as we have | |
1426 | * changed the state of the link already). | |
1427 | * | |
515db266 | 1428 | * Links without the DL_FLAG_MANAGED flag set are ignored. |
9ed98953 RW |
1429 | */ |
1430 | void device_links_unbind_consumers(struct device *dev) | |
1431 | { | |
1432 | struct device_link *link; | |
1433 | ||
1434 | start: | |
1435 | device_links_write_lock(); | |
1436 | ||
1437 | list_for_each_entry(link, &dev->links.consumers, s_node) { | |
1438 | enum device_link_state status; | |
1439 | ||
05ef983e SK |
1440 | if (!(link->flags & DL_FLAG_MANAGED) || |
1441 | link->flags & DL_FLAG_SYNC_STATE_ONLY) | |
9ed98953 RW |
1442 | continue; |
1443 | ||
1444 | status = link->status; | |
1445 | if (status == DL_STATE_CONSUMER_PROBE) { | |
1446 | device_links_write_unlock(); | |
1447 | ||
1448 | wait_for_device_probe(); | |
1449 | goto start; | |
1450 | } | |
1451 | WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); | |
1452 | if (status == DL_STATE_ACTIVE) { | |
1453 | struct device *consumer = link->consumer; | |
1454 | ||
1455 | get_device(consumer); | |
1456 | ||
1457 | device_links_write_unlock(); | |
1458 | ||
1459 | device_release_driver_internal(consumer, NULL, | |
1460 | consumer->parent); | |
1461 | put_device(consumer); | |
1462 | goto start; | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | device_links_write_unlock(); | |
1467 | } | |
1468 | ||
1469 | /** | |
1470 | * device_links_purge - Delete existing links to other devices. | |
1471 | * @dev: Target device. | |
1472 | */ | |
1473 | static void device_links_purge(struct device *dev) | |
1474 | { | |
1475 | struct device_link *link, *ln; | |
1476 | ||
287905e6 SK |
1477 | if (dev->class == &devlink_class) |
1478 | return; | |
1479 | ||
e2ae9bcc | 1480 | mutex_lock(&wfs_lock); |
66482f64 | 1481 | list_del_init(&dev->links.needs_suppliers); |
e2ae9bcc SK |
1482 | mutex_unlock(&wfs_lock); |
1483 | ||
9ed98953 RW |
1484 | /* |
1485 | * Delete all of the remaining links from this device to any other | |
1486 | * devices (either consumers or suppliers). | |
1487 | */ | |
1488 | device_links_write_lock(); | |
1489 | ||
1490 | list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { | |
1491 | WARN_ON(link->status == DL_STATE_ACTIVE); | |
ead18c23 | 1492 | __device_link_del(&link->kref); |
9ed98953 RW |
1493 | } |
1494 | ||
1495 | list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { | |
1496 | WARN_ON(link->status != DL_STATE_DORMANT && | |
1497 | link->status != DL_STATE_NONE); | |
ead18c23 | 1498 | __device_link_del(&link->kref); |
9ed98953 RW |
1499 | } |
1500 | ||
1501 | device_links_write_unlock(); | |
1502 | } | |
1503 | ||
42926ac3 SK |
1504 | static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; |
1505 | static int __init fw_devlink_setup(char *arg) | |
1506 | { | |
1507 | if (!arg) | |
1508 | return -EINVAL; | |
1509 | ||
1510 | if (strcmp(arg, "off") == 0) { | |
1511 | fw_devlink_flags = 0; | |
1512 | } else if (strcmp(arg, "permissive") == 0) { | |
1513 | fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; | |
1514 | } else if (strcmp(arg, "on") == 0) { | |
1515 | fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; | |
1516 | } else if (strcmp(arg, "rpm") == 0) { | |
1517 | fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | | |
1518 | DL_FLAG_PM_RUNTIME; | |
1519 | } | |
1520 | return 0; | |
1521 | } | |
1522 | early_param("fw_devlink", fw_devlink_setup); | |
1523 | ||
1524 | u32 fw_devlink_get_flags(void) | |
1525 | { | |
1526 | return fw_devlink_flags; | |
1527 | } | |
1528 | ||
1529 | static bool fw_devlink_is_permissive(void) | |
1530 | { | |
1531 | return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; | |
1532 | } | |
1533 | ||
5f5377ea SK |
1534 | static void fw_devlink_link_device(struct device *dev) |
1535 | { | |
1536 | int fw_ret; | |
1537 | ||
c84b9090 | 1538 | device_link_add_missing_supplier_links(); |
716a7a25 | 1539 | |
c84b9090 | 1540 | if (fw_devlink_flags && fwnode_has_op(dev->fwnode, add_links)) { |
5f5377ea | 1541 | fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); |
c84b9090 SK |
1542 | if (fw_ret == -ENODEV && !fw_devlink_is_permissive()) |
1543 | device_link_wait_for_mandatory_supplier(dev); | |
1544 | else if (fw_ret) | |
1545 | device_link_wait_for_optional_supplier(dev); | |
5f5377ea SK |
1546 | } |
1547 | } | |
1548 | ||
9ed98953 RW |
1549 | /* Device links support end. */ |
1550 | ||
4a3ad20c GKH |
1551 | int (*platform_notify)(struct device *dev) = NULL; |
1552 | int (*platform_notify_remove)(struct device *dev) = NULL; | |
e105b8bf DW |
1553 | static struct kobject *dev_kobj; |
1554 | struct kobject *sysfs_dev_char_kobj; | |
1555 | struct kobject *sysfs_dev_block_kobj; | |
1da177e4 | 1556 | |
5e33bc41 RW |
1557 | static DEFINE_MUTEX(device_hotplug_lock); |
1558 | ||
1559 | void lock_device_hotplug(void) | |
1560 | { | |
1561 | mutex_lock(&device_hotplug_lock); | |
1562 | } | |
1563 | ||
1564 | void unlock_device_hotplug(void) | |
1565 | { | |
1566 | mutex_unlock(&device_hotplug_lock); | |
1567 | } | |
1568 | ||
1569 | int lock_device_hotplug_sysfs(void) | |
1570 | { | |
1571 | if (mutex_trylock(&device_hotplug_lock)) | |
1572 | return 0; | |
1573 | ||
1574 | /* Avoid busy looping (5 ms of sleep should do). */ | |
1575 | msleep(5); | |
1576 | return restart_syscall(); | |
1577 | } | |
1578 | ||
4e886c29 GKH |
1579 | #ifdef CONFIG_BLOCK |
1580 | static inline int device_is_not_partition(struct device *dev) | |
1581 | { | |
1582 | return !(dev->type == &part_type); | |
1583 | } | |
1584 | #else | |
1585 | static inline int device_is_not_partition(struct device *dev) | |
1586 | { | |
1587 | return 1; | |
1588 | } | |
1589 | #endif | |
1da177e4 | 1590 | |
07de0e86 HK |
1591 | static int |
1592 | device_platform_notify(struct device *dev, enum kobject_action action) | |
1593 | { | |
7847a145 HK |
1594 | int ret; |
1595 | ||
1596 | ret = acpi_platform_notify(dev, action); | |
1597 | if (ret) | |
1598 | return ret; | |
1599 | ||
59abd836 HK |
1600 | ret = software_node_notify(dev, action); |
1601 | if (ret) | |
1602 | return ret; | |
1603 | ||
07de0e86 HK |
1604 | if (platform_notify && action == KOBJ_ADD) |
1605 | platform_notify(dev); | |
1606 | else if (platform_notify_remove && action == KOBJ_REMOVE) | |
1607 | platform_notify_remove(dev); | |
1608 | return 0; | |
1609 | } | |
1610 | ||
3e95637a AS |
1611 | /** |
1612 | * dev_driver_string - Return a device's driver name, if at all possible | |
1613 | * @dev: struct device to get the name of | |
1614 | * | |
1615 | * Will return the device's driver's name if it is bound to a device. If | |
9169c012 | 1616 | * the device is not bound to a driver, it will return the name of the bus |
3e95637a AS |
1617 | * it is attached to. If it is not attached to a bus either, an empty |
1618 | * string will be returned. | |
1619 | */ | |
bf9ca69f | 1620 | const char *dev_driver_string(const struct device *dev) |
3e95637a | 1621 | { |
3589972e AS |
1622 | struct device_driver *drv; |
1623 | ||
1624 | /* dev->driver can change to NULL underneath us because of unbinding, | |
1625 | * so be careful about accessing it. dev->bus and dev->class should | |
1626 | * never change once they are set, so they don't need special care. | |
1627 | */ | |
6aa7de05 | 1628 | drv = READ_ONCE(dev->driver); |
3589972e | 1629 | return drv ? drv->name : |
a456b702 JD |
1630 | (dev->bus ? dev->bus->name : |
1631 | (dev->class ? dev->class->name : "")); | |
3e95637a | 1632 | } |
310a922d | 1633 | EXPORT_SYMBOL(dev_driver_string); |
3e95637a | 1634 | |
1da177e4 LT |
1635 | #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) |
1636 | ||
4a3ad20c GKH |
1637 | static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, |
1638 | char *buf) | |
1da177e4 | 1639 | { |
4a3ad20c | 1640 | struct device_attribute *dev_attr = to_dev_attr(attr); |
b0d1f807 | 1641 | struct device *dev = kobj_to_dev(kobj); |
4a0c20bf | 1642 | ssize_t ret = -EIO; |
1da177e4 LT |
1643 | |
1644 | if (dev_attr->show) | |
54b6f35c | 1645 | ret = dev_attr->show(dev, dev_attr, buf); |
815d2d50 | 1646 | if (ret >= (ssize_t)PAGE_SIZE) { |
a52668c6 SS |
1647 | printk("dev_attr_show: %pS returned bad count\n", |
1648 | dev_attr->show); | |
815d2d50 | 1649 | } |
1da177e4 LT |
1650 | return ret; |
1651 | } | |
1652 | ||
4a3ad20c GKH |
1653 | static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, |
1654 | const char *buf, size_t count) | |
1da177e4 | 1655 | { |
4a3ad20c | 1656 | struct device_attribute *dev_attr = to_dev_attr(attr); |
b0d1f807 | 1657 | struct device *dev = kobj_to_dev(kobj); |
4a0c20bf | 1658 | ssize_t ret = -EIO; |
1da177e4 LT |
1659 | |
1660 | if (dev_attr->store) | |
54b6f35c | 1661 | ret = dev_attr->store(dev, dev_attr, buf, count); |
1da177e4 LT |
1662 | return ret; |
1663 | } | |
1664 | ||
52cf25d0 | 1665 | static const struct sysfs_ops dev_sysfs_ops = { |
1da177e4 LT |
1666 | .show = dev_attr_show, |
1667 | .store = dev_attr_store, | |
1668 | }; | |
1669 | ||
ca22e56d KS |
1670 | #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) |
1671 | ||
1672 | ssize_t device_store_ulong(struct device *dev, | |
1673 | struct device_attribute *attr, | |
1674 | const char *buf, size_t size) | |
1675 | { | |
1676 | struct dev_ext_attribute *ea = to_ext_attr(attr); | |
f88184bf K |
1677 | int ret; |
1678 | unsigned long new; | |
1679 | ||
1680 | ret = kstrtoul(buf, 0, &new); | |
1681 | if (ret) | |
1682 | return ret; | |
ca22e56d KS |
1683 | *(unsigned long *)(ea->var) = new; |
1684 | /* Always return full write size even if we didn't consume all */ | |
1685 | return size; | |
1686 | } | |
1687 | EXPORT_SYMBOL_GPL(device_store_ulong); | |
1688 | ||
1689 | ssize_t device_show_ulong(struct device *dev, | |
1690 | struct device_attribute *attr, | |
1691 | char *buf) | |
1692 | { | |
1693 | struct dev_ext_attribute *ea = to_ext_attr(attr); | |
aa838896 | 1694 | return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); |
ca22e56d KS |
1695 | } |
1696 | EXPORT_SYMBOL_GPL(device_show_ulong); | |
1697 | ||
1698 | ssize_t device_store_int(struct device *dev, | |
1699 | struct device_attribute *attr, | |
1700 | const char *buf, size_t size) | |
1701 | { | |
1702 | struct dev_ext_attribute *ea = to_ext_attr(attr); | |
f88184bf K |
1703 | int ret; |
1704 | long new; | |
1705 | ||
1706 | ret = kstrtol(buf, 0, &new); | |
1707 | if (ret) | |
1708 | return ret; | |
1709 | ||
1710 | if (new > INT_MAX || new < INT_MIN) | |
ca22e56d KS |
1711 | return -EINVAL; |
1712 | *(int *)(ea->var) = new; | |
1713 | /* Always return full write size even if we didn't consume all */ | |
1714 | return size; | |
1715 | } | |
1716 | EXPORT_SYMBOL_GPL(device_store_int); | |
1717 | ||
1718 | ssize_t device_show_int(struct device *dev, | |
1719 | struct device_attribute *attr, | |
1720 | char *buf) | |
1721 | { | |
1722 | struct dev_ext_attribute *ea = to_ext_attr(attr); | |
1723 | ||
aa838896 | 1724 | return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); |
ca22e56d KS |
1725 | } |
1726 | EXPORT_SYMBOL_GPL(device_show_int); | |
1da177e4 | 1727 | |
91872392 BP |
1728 | ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, |
1729 | const char *buf, size_t size) | |
1730 | { | |
1731 | struct dev_ext_attribute *ea = to_ext_attr(attr); | |
1732 | ||
1733 | if (strtobool(buf, ea->var) < 0) | |
1734 | return -EINVAL; | |
1735 | ||
1736 | return size; | |
1737 | } | |
1738 | EXPORT_SYMBOL_GPL(device_store_bool); | |
1739 | ||
1740 | ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, | |
1741 | char *buf) | |
1742 | { | |
1743 | struct dev_ext_attribute *ea = to_ext_attr(attr); | |
1744 | ||
aa838896 | 1745 | return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); |
91872392 BP |
1746 | } |
1747 | EXPORT_SYMBOL_GPL(device_show_bool); | |
1748 | ||
1da177e4 | 1749 | /** |
f8878dcb RD |
1750 | * device_release - free device structure. |
1751 | * @kobj: device's kobject. | |
1da177e4 | 1752 | * |
f8878dcb RD |
1753 | * This is called once the reference count for the object |
1754 | * reaches 0. We forward the call to the device's release | |
1755 | * method, which should handle actually freeing the structure. | |
1da177e4 | 1756 | */ |
4a3ad20c | 1757 | static void device_release(struct kobject *kobj) |
1da177e4 | 1758 | { |
b0d1f807 | 1759 | struct device *dev = kobj_to_dev(kobj); |
fb069a5d | 1760 | struct device_private *p = dev->p; |
1da177e4 | 1761 | |
a525a3dd ML |
1762 | /* |
1763 | * Some platform devices are driven without driver attached | |
1764 | * and managed resources may have been acquired. Make sure | |
1765 | * all resources are released. | |
1766 | * | |
1767 | * Drivers still can add resources into device after device | |
1768 | * is deleted but alive, so release devres here to avoid | |
1769 | * possible memory leak. | |
1770 | */ | |
1771 | devres_release_all(dev); | |
1772 | ||
e0d07278 JQ |
1773 | kfree(dev->dma_range_map); |
1774 | ||
1da177e4 LT |
1775 | if (dev->release) |
1776 | dev->release(dev); | |
f9f852df KS |
1777 | else if (dev->type && dev->type->release) |
1778 | dev->type->release(dev); | |
2620efef GKH |
1779 | else if (dev->class && dev->class->dev_release) |
1780 | dev->class->dev_release(dev); | |
f810a5cf | 1781 | else |
0c1bc6b8 | 1782 | WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", |
1e0b2cf9 | 1783 | dev_name(dev)); |
fb069a5d | 1784 | kfree(p); |
1da177e4 LT |
1785 | } |
1786 | ||
bc451f20 EB |
1787 | static const void *device_namespace(struct kobject *kobj) |
1788 | { | |
b0d1f807 | 1789 | struct device *dev = kobj_to_dev(kobj); |
bc451f20 EB |
1790 | const void *ns = NULL; |
1791 | ||
1792 | if (dev->class && dev->class->ns_type) | |
1793 | ns = dev->class->namespace(dev); | |
1794 | ||
1795 | return ns; | |
1796 | } | |
1797 | ||
9944e894 DT |
1798 | static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) |
1799 | { | |
1800 | struct device *dev = kobj_to_dev(kobj); | |
1801 | ||
1802 | if (dev->class && dev->class->get_ownership) | |
1803 | dev->class->get_ownership(dev, uid, gid); | |
1804 | } | |
1805 | ||
8f4afc41 | 1806 | static struct kobj_type device_ktype = { |
1da177e4 LT |
1807 | .release = device_release, |
1808 | .sysfs_ops = &dev_sysfs_ops, | |
bc451f20 | 1809 | .namespace = device_namespace, |
9944e894 | 1810 | .get_ownership = device_get_ownership, |
1da177e4 LT |
1811 | }; |
1812 | ||
1813 | ||
312c004d | 1814 | static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) |
1da177e4 LT |
1815 | { |
1816 | struct kobj_type *ktype = get_ktype(kobj); | |
1817 | ||
8f4afc41 | 1818 | if (ktype == &device_ktype) { |
b0d1f807 | 1819 | struct device *dev = kobj_to_dev(kobj); |
1da177e4 LT |
1820 | if (dev->bus) |
1821 | return 1; | |
23681e47 GKH |
1822 | if (dev->class) |
1823 | return 1; | |
1da177e4 LT |
1824 | } |
1825 | return 0; | |
1826 | } | |
1827 | ||
312c004d | 1828 | static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) |
1da177e4 | 1829 | { |
b0d1f807 | 1830 | struct device *dev = kobj_to_dev(kobj); |
1da177e4 | 1831 | |
23681e47 GKH |
1832 | if (dev->bus) |
1833 | return dev->bus->name; | |
1834 | if (dev->class) | |
1835 | return dev->class->name; | |
1836 | return NULL; | |
1da177e4 LT |
1837 | } |
1838 | ||
7eff2e7a KS |
1839 | static int dev_uevent(struct kset *kset, struct kobject *kobj, |
1840 | struct kobj_uevent_env *env) | |
1da177e4 | 1841 | { |
b0d1f807 | 1842 | struct device *dev = kobj_to_dev(kobj); |
1da177e4 LT |
1843 | int retval = 0; |
1844 | ||
6fcf53ac | 1845 | /* add device node properties if present */ |
23681e47 | 1846 | if (MAJOR(dev->devt)) { |
6fcf53ac KS |
1847 | const char *tmp; |
1848 | const char *name; | |
2c9ede55 | 1849 | umode_t mode = 0; |
4e4098a3 GKH |
1850 | kuid_t uid = GLOBAL_ROOT_UID; |
1851 | kgid_t gid = GLOBAL_ROOT_GID; | |
6fcf53ac | 1852 | |
7eff2e7a KS |
1853 | add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); |
1854 | add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); | |
3c2670e6 | 1855 | name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); |
6fcf53ac KS |
1856 | if (name) { |
1857 | add_uevent_var(env, "DEVNAME=%s", name); | |
e454cea2 KS |
1858 | if (mode) |
1859 | add_uevent_var(env, "DEVMODE=%#o", mode & 0777); | |
4e4098a3 GKH |
1860 | if (!uid_eq(uid, GLOBAL_ROOT_UID)) |
1861 | add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); | |
1862 | if (!gid_eq(gid, GLOBAL_ROOT_GID)) | |
1863 | add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); | |
3c2670e6 | 1864 | kfree(tmp); |
6fcf53ac | 1865 | } |
23681e47 GKH |
1866 | } |
1867 | ||
414264f9 | 1868 | if (dev->type && dev->type->name) |
7eff2e7a | 1869 | add_uevent_var(env, "DEVTYPE=%s", dev->type->name); |
414264f9 | 1870 | |
239378f1 | 1871 | if (dev->driver) |
7eff2e7a | 1872 | add_uevent_var(env, "DRIVER=%s", dev->driver->name); |
239378f1 | 1873 | |
07d57a32 GL |
1874 | /* Add common DT information about the device */ |
1875 | of_device_uevent(dev, env); | |
1876 | ||
7eff2e7a | 1877 | /* have the bus specific function add its stuff */ |
312c004d | 1878 | if (dev->bus && dev->bus->uevent) { |
7eff2e7a | 1879 | retval = dev->bus->uevent(dev, env); |
f9f852df | 1880 | if (retval) |
7dc72b28 | 1881 | pr_debug("device: '%s': %s: bus uevent() returned %d\n", |
1e0b2cf9 | 1882 | dev_name(dev), __func__, retval); |
1da177e4 LT |
1883 | } |
1884 | ||
7eff2e7a | 1885 | /* have the class specific function add its stuff */ |
2620efef | 1886 | if (dev->class && dev->class->dev_uevent) { |
7eff2e7a | 1887 | retval = dev->class->dev_uevent(dev, env); |
f9f852df | 1888 | if (retval) |
7dc72b28 | 1889 | pr_debug("device: '%s': %s: class uevent() " |
1e0b2cf9 | 1890 | "returned %d\n", dev_name(dev), |
2b3a302a | 1891 | __func__, retval); |
f9f852df KS |
1892 | } |
1893 | ||
eef35c2d | 1894 | /* have the device type specific function add its stuff */ |
f9f852df | 1895 | if (dev->type && dev->type->uevent) { |
7eff2e7a | 1896 | retval = dev->type->uevent(dev, env); |
f9f852df | 1897 | if (retval) |
7dc72b28 | 1898 | pr_debug("device: '%s': %s: dev_type uevent() " |
1e0b2cf9 | 1899 | "returned %d\n", dev_name(dev), |
2b3a302a | 1900 | __func__, retval); |
2620efef GKH |
1901 | } |
1902 | ||
1da177e4 LT |
1903 | return retval; |
1904 | } | |
1905 | ||
9cd43611 | 1906 | static const struct kset_uevent_ops device_uevent_ops = { |
312c004d KS |
1907 | .filter = dev_uevent_filter, |
1908 | .name = dev_uevent_name, | |
1909 | .uevent = dev_uevent, | |
1da177e4 LT |
1910 | }; |
1911 | ||
c5e064a6 | 1912 | static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, |
16574dcc KS |
1913 | char *buf) |
1914 | { | |
1915 | struct kobject *top_kobj; | |
1916 | struct kset *kset; | |
7eff2e7a | 1917 | struct kobj_uevent_env *env = NULL; |
16574dcc | 1918 | int i; |
948b3edb | 1919 | int len = 0; |
16574dcc KS |
1920 | int retval; |
1921 | ||
1922 | /* search the kset, the device belongs to */ | |
1923 | top_kobj = &dev->kobj; | |
5c5daf65 KS |
1924 | while (!top_kobj->kset && top_kobj->parent) |
1925 | top_kobj = top_kobj->parent; | |
16574dcc KS |
1926 | if (!top_kobj->kset) |
1927 | goto out; | |
5c5daf65 | 1928 | |
16574dcc KS |
1929 | kset = top_kobj->kset; |
1930 | if (!kset->uevent_ops || !kset->uevent_ops->uevent) | |
1931 | goto out; | |
1932 | ||
1933 | /* respect filter */ | |
1934 | if (kset->uevent_ops && kset->uevent_ops->filter) | |
1935 | if (!kset->uevent_ops->filter(kset, &dev->kobj)) | |
1936 | goto out; | |
1937 | ||
7eff2e7a KS |
1938 | env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); |
1939 | if (!env) | |
c7308c81 GKH |
1940 | return -ENOMEM; |
1941 | ||
16574dcc | 1942 | /* let the kset specific function add its keys */ |
7eff2e7a | 1943 | retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); |
16574dcc KS |
1944 | if (retval) |
1945 | goto out; | |
1946 | ||
1947 | /* copy keys to file */ | |
7eff2e7a | 1948 | for (i = 0; i < env->envp_idx; i++) |
948b3edb | 1949 | len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); |
16574dcc | 1950 | out: |
7eff2e7a | 1951 | kfree(env); |
948b3edb | 1952 | return len; |
16574dcc KS |
1953 | } |
1954 | ||
c5e064a6 | 1955 | static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, |
a7fd6706 KS |
1956 | const char *buf, size_t count) |
1957 | { | |
df44b479 PR |
1958 | int rc; |
1959 | ||
1960 | rc = kobject_synth_uevent(&dev->kobj, buf, count); | |
1961 | ||
1962 | if (rc) { | |
f36776fa | 1963 | dev_err(dev, "uevent: failed to send synthetic uevent\n"); |
df44b479 PR |
1964 | return rc; |
1965 | } | |
60a96a59 | 1966 | |
a7fd6706 KS |
1967 | return count; |
1968 | } | |
c5e064a6 | 1969 | static DEVICE_ATTR_RW(uevent); |
a7fd6706 | 1970 | |
c5e064a6 | 1971 | static ssize_t online_show(struct device *dev, struct device_attribute *attr, |
4f3549d7 RW |
1972 | char *buf) |
1973 | { | |
1974 | bool val; | |
1975 | ||
5e33bc41 | 1976 | device_lock(dev); |
4f3549d7 | 1977 | val = !dev->offline; |
5e33bc41 | 1978 | device_unlock(dev); |
aa838896 | 1979 | return sysfs_emit(buf, "%u\n", val); |
4f3549d7 RW |
1980 | } |
1981 | ||
c5e064a6 | 1982 | static ssize_t online_store(struct device *dev, struct device_attribute *attr, |
4f3549d7 RW |
1983 | const char *buf, size_t count) |
1984 | { | |
1985 | bool val; | |
1986 | int ret; | |
1987 | ||
1988 | ret = strtobool(buf, &val); | |
1989 | if (ret < 0) | |
1990 | return ret; | |
1991 | ||
5e33bc41 RW |
1992 | ret = lock_device_hotplug_sysfs(); |
1993 | if (ret) | |
1994 | return ret; | |
1995 | ||
4f3549d7 RW |
1996 | ret = val ? device_online(dev) : device_offline(dev); |
1997 | unlock_device_hotplug(); | |
1998 | return ret < 0 ? ret : count; | |
1999 | } | |
c5e064a6 | 2000 | static DEVICE_ATTR_RW(online); |
4f3549d7 | 2001 | |
fa6fdb33 | 2002 | int device_add_groups(struct device *dev, const struct attribute_group **groups) |
621a1672 | 2003 | { |
3e9b2bae | 2004 | return sysfs_create_groups(&dev->kobj, groups); |
de0ff00d | 2005 | } |
a7670d42 | 2006 | EXPORT_SYMBOL_GPL(device_add_groups); |
de0ff00d | 2007 | |
fa6fdb33 GKH |
2008 | void device_remove_groups(struct device *dev, |
2009 | const struct attribute_group **groups) | |
de0ff00d | 2010 | { |
3e9b2bae | 2011 | sysfs_remove_groups(&dev->kobj, groups); |
de0ff00d | 2012 | } |
a7670d42 | 2013 | EXPORT_SYMBOL_GPL(device_remove_groups); |
de0ff00d | 2014 | |
57b8ff07 DT |
2015 | union device_attr_group_devres { |
2016 | const struct attribute_group *group; | |
2017 | const struct attribute_group **groups; | |
2018 | }; | |
2019 | ||
2020 | static int devm_attr_group_match(struct device *dev, void *res, void *data) | |
2021 | { | |
2022 | return ((union device_attr_group_devres *)res)->group == data; | |
2023 | } | |
2024 | ||
2025 | static void devm_attr_group_remove(struct device *dev, void *res) | |
2026 | { | |
2027 | union device_attr_group_devres *devres = res; | |
2028 | const struct attribute_group *group = devres->group; | |
2029 | ||
2030 | dev_dbg(dev, "%s: removing group %p\n", __func__, group); | |
2031 | sysfs_remove_group(&dev->kobj, group); | |
2032 | } | |
2033 | ||
2034 | static void devm_attr_groups_remove(struct device *dev, void *res) | |
2035 | { | |
2036 | union device_attr_group_devres *devres = res; | |
2037 | const struct attribute_group **groups = devres->groups; | |
2038 | ||
2039 | dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); | |
2040 | sysfs_remove_groups(&dev->kobj, groups); | |
2041 | } | |
2042 | ||
2043 | /** | |
2044 | * devm_device_add_group - given a device, create a managed attribute group | |
2045 | * @dev: The device to create the group for | |
2046 | * @grp: The attribute group to create | |
2047 | * | |
2048 | * This function creates a group for the first time. It will explicitly | |
2049 | * warn and error if any of the attribute files being created already exist. | |
2050 | * | |
2051 | * Returns 0 on success or error code on failure. | |
2052 | */ | |
2053 | int devm_device_add_group(struct device *dev, const struct attribute_group *grp) | |
2054 | { | |
2055 | union device_attr_group_devres *devres; | |
2056 | int error; | |
2057 | ||
2058 | devres = devres_alloc(devm_attr_group_remove, | |
2059 | sizeof(*devres), GFP_KERNEL); | |
2060 | if (!devres) | |
2061 | return -ENOMEM; | |
2062 | ||
2063 | error = sysfs_create_group(&dev->kobj, grp); | |
2064 | if (error) { | |
2065 | devres_free(devres); | |
2066 | return error; | |
2067 | } | |
2068 | ||
2069 | devres->group = grp; | |
2070 | devres_add(dev, devres); | |
2071 | return 0; | |
2072 | } | |
2073 | EXPORT_SYMBOL_GPL(devm_device_add_group); | |
2074 | ||
2075 | /** | |
2076 | * devm_device_remove_group: remove a managed group from a device | |
2077 | * @dev: device to remove the group from | |
2078 | * @grp: group to remove | |
2079 | * | |
2080 | * This function removes a group of attributes from a device. The attributes | |
2081 | * previously have to have been created for this group, otherwise it will fail. | |
2082 | */ | |
2083 | void devm_device_remove_group(struct device *dev, | |
2084 | const struct attribute_group *grp) | |
2085 | { | |
2086 | WARN_ON(devres_release(dev, devm_attr_group_remove, | |
2087 | devm_attr_group_match, | |
2088 | /* cast away const */ (void *)grp)); | |
2089 | } | |
2090 | EXPORT_SYMBOL_GPL(devm_device_remove_group); | |
2091 | ||
2092 | /** | |
2093 | * devm_device_add_groups - create a bunch of managed attribute groups | |
2094 | * @dev: The device to create the group for | |
2095 | * @groups: The attribute groups to create, NULL terminated | |
2096 | * | |
2097 | * This function creates a bunch of managed attribute groups. If an error | |
2098 | * occurs when creating a group, all previously created groups will be | |
2099 | * removed, unwinding everything back to the original state when this | |
2100 | * function was called. It will explicitly warn and error if any of the | |
2101 | * attribute files being created already exist. | |
2102 | * | |
2103 | * Returns 0 on success or error code from sysfs_create_group on failure. | |
2104 | */ | |
2105 | int devm_device_add_groups(struct device *dev, | |
2106 | const struct attribute_group **groups) | |
2107 | { | |
2108 | union device_attr_group_devres *devres; | |
2109 | int error; | |
2110 | ||
2111 | devres = devres_alloc(devm_attr_groups_remove, | |
2112 | sizeof(*devres), GFP_KERNEL); | |
2113 | if (!devres) | |
2114 | return -ENOMEM; | |
2115 | ||
2116 | error = sysfs_create_groups(&dev->kobj, groups); | |
2117 | if (error) { | |
2118 | devres_free(devres); | |
2119 | return error; | |
2120 | } | |
2121 | ||
2122 | devres->groups = groups; | |
2123 | devres_add(dev, devres); | |
2124 | return 0; | |
2125 | } | |
2126 | EXPORT_SYMBOL_GPL(devm_device_add_groups); | |
2127 | ||
2128 | /** | |
2129 | * devm_device_remove_groups - remove a list of managed groups | |
2130 | * | |
2131 | * @dev: The device for the groups to be removed from | |
2132 | * @groups: NULL terminated list of groups to be removed | |
2133 | * | |
2134 | * If groups is not NULL, remove the specified groups from the device. | |
2135 | */ | |
2136 | void devm_device_remove_groups(struct device *dev, | |
2137 | const struct attribute_group **groups) | |
2138 | { | |
2139 | WARN_ON(devres_release(dev, devm_attr_groups_remove, | |
2140 | devm_attr_group_match, | |
2141 | /* cast away const */ (void *)groups)); | |
2142 | } | |
2143 | EXPORT_SYMBOL_GPL(devm_device_remove_groups); | |
de0ff00d | 2144 | |
2620efef GKH |
2145 | static int device_add_attrs(struct device *dev) |
2146 | { | |
2147 | struct class *class = dev->class; | |
aed65af1 | 2148 | const struct device_type *type = dev->type; |
621a1672 | 2149 | int error; |
2620efef | 2150 | |
621a1672 | 2151 | if (class) { |
d05a6f96 | 2152 | error = device_add_groups(dev, class->dev_groups); |
f9f852df | 2153 | if (error) |
621a1672 | 2154 | return error; |
2620efef | 2155 | } |
f9f852df | 2156 | |
621a1672 DT |
2157 | if (type) { |
2158 | error = device_add_groups(dev, type->groups); | |
f9f852df | 2159 | if (error) |
a6b01ded | 2160 | goto err_remove_class_groups; |
f9f852df KS |
2161 | } |
2162 | ||
621a1672 DT |
2163 | error = device_add_groups(dev, dev->groups); |
2164 | if (error) | |
2165 | goto err_remove_type_groups; | |
2166 | ||
4f3549d7 | 2167 | if (device_supports_offline(dev) && !dev->offline_disabled) { |
c5e064a6 | 2168 | error = device_create_file(dev, &dev_attr_online); |
4f3549d7 | 2169 | if (error) |
ecfbf6fd | 2170 | goto err_remove_dev_groups; |
4f3549d7 RW |
2171 | } |
2172 | ||
da6d6475 SK |
2173 | if (fw_devlink_flags && !fw_devlink_is_permissive()) { |
2174 | error = device_create_file(dev, &dev_attr_waiting_for_supplier); | |
2175 | if (error) | |
2176 | goto err_remove_dev_online; | |
2177 | } | |
2178 | ||
621a1672 DT |
2179 | return 0; |
2180 | ||
da6d6475 SK |
2181 | err_remove_dev_online: |
2182 | device_remove_file(dev, &dev_attr_online); | |
ecfbf6fd RW |
2183 | err_remove_dev_groups: |
2184 | device_remove_groups(dev, dev->groups); | |
621a1672 DT |
2185 | err_remove_type_groups: |
2186 | if (type) | |
2187 | device_remove_groups(dev, type->groups); | |
d05a6f96 GKH |
2188 | err_remove_class_groups: |
2189 | if (class) | |
2190 | device_remove_groups(dev, class->dev_groups); | |
621a1672 | 2191 | |
2620efef GKH |
2192 | return error; |
2193 | } | |
2194 | ||
2195 | static void device_remove_attrs(struct device *dev) | |
2196 | { | |
2197 | struct class *class = dev->class; | |
aed65af1 | 2198 | const struct device_type *type = dev->type; |
2620efef | 2199 | |
da6d6475 | 2200 | device_remove_file(dev, &dev_attr_waiting_for_supplier); |
c5e064a6 | 2201 | device_remove_file(dev, &dev_attr_online); |
621a1672 | 2202 | device_remove_groups(dev, dev->groups); |
f9f852df | 2203 | |
621a1672 DT |
2204 | if (type) |
2205 | device_remove_groups(dev, type->groups); | |
2206 | ||
a6b01ded | 2207 | if (class) |
d05a6f96 | 2208 | device_remove_groups(dev, class->dev_groups); |
2620efef GKH |
2209 | } |
2210 | ||
c5e064a6 | 2211 | static ssize_t dev_show(struct device *dev, struct device_attribute *attr, |
23681e47 GKH |
2212 | char *buf) |
2213 | { | |
2214 | return print_dev_t(buf, dev->devt); | |
2215 | } | |
c5e064a6 | 2216 | static DEVICE_ATTR_RO(dev); |
ad6a1e1c | 2217 | |
ca22e56d | 2218 | /* /sys/devices/ */ |
881c6cfd | 2219 | struct kset *devices_kset; |
1da177e4 | 2220 | |
52cdbdd4 GS |
2221 | /** |
2222 | * devices_kset_move_before - Move device in the devices_kset's list. | |
2223 | * @deva: Device to move. | |
2224 | * @devb: Device @deva should come before. | |
2225 | */ | |
2226 | static void devices_kset_move_before(struct device *deva, struct device *devb) | |
2227 | { | |
2228 | if (!devices_kset) | |
2229 | return; | |
2230 | pr_debug("devices_kset: Moving %s before %s\n", | |
2231 | dev_name(deva), dev_name(devb)); | |
2232 | spin_lock(&devices_kset->list_lock); | |
2233 | list_move_tail(&deva->kobj.entry, &devb->kobj.entry); | |
2234 | spin_unlock(&devices_kset->list_lock); | |
2235 | } | |
2236 | ||
2237 | /** | |
2238 | * devices_kset_move_after - Move device in the devices_kset's list. | |
2239 | * @deva: Device to move | |
2240 | * @devb: Device @deva should come after. | |
2241 | */ | |
2242 | static void devices_kset_move_after(struct device *deva, struct device *devb) | |
2243 | { | |
2244 | if (!devices_kset) | |
2245 | return; | |
2246 | pr_debug("devices_kset: Moving %s after %s\n", | |
2247 | dev_name(deva), dev_name(devb)); | |
2248 | spin_lock(&devices_kset->list_lock); | |
2249 | list_move(&deva->kobj.entry, &devb->kobj.entry); | |
2250 | spin_unlock(&devices_kset->list_lock); | |
2251 | } | |
2252 | ||
2253 | /** | |
2254 | * devices_kset_move_last - move the device to the end of devices_kset's list. | |
2255 | * @dev: device to move | |
2256 | */ | |
2257 | void devices_kset_move_last(struct device *dev) | |
2258 | { | |
2259 | if (!devices_kset) | |
2260 | return; | |
2261 | pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); | |
2262 | spin_lock(&devices_kset->list_lock); | |
2263 | list_move_tail(&dev->kobj.entry, &devices_kset->list); | |
2264 | spin_unlock(&devices_kset->list_lock); | |
2265 | } | |
2266 | ||
1da177e4 | 2267 | /** |
4a3ad20c GKH |
2268 | * device_create_file - create sysfs attribute file for device. |
2269 | * @dev: device. | |
2270 | * @attr: device attribute descriptor. | |
1da177e4 | 2271 | */ |
26579ab7 PC |
2272 | int device_create_file(struct device *dev, |
2273 | const struct device_attribute *attr) | |
1da177e4 LT |
2274 | { |
2275 | int error = 0; | |
8f46baaa FB |
2276 | |
2277 | if (dev) { | |
2278 | WARN(((attr->attr.mode & S_IWUGO) && !attr->store), | |
97521978 | 2279 | "Attribute %s: write permission without 'store'\n", |
2280 | attr->attr.name); | |
8f46baaa | 2281 | WARN(((attr->attr.mode & S_IRUGO) && !attr->show), |
97521978 | 2282 | "Attribute %s: read permission without 'show'\n", |
2283 | attr->attr.name); | |
1da177e4 | 2284 | error = sysfs_create_file(&dev->kobj, &attr->attr); |
8f46baaa FB |
2285 | } |
2286 | ||
1da177e4 LT |
2287 | return error; |
2288 | } | |
86df2687 | 2289 | EXPORT_SYMBOL_GPL(device_create_file); |
1da177e4 LT |
2290 | |
2291 | /** | |
4a3ad20c GKH |
2292 | * device_remove_file - remove sysfs attribute file. |
2293 | * @dev: device. | |
2294 | * @attr: device attribute descriptor. | |
1da177e4 | 2295 | */ |
26579ab7 PC |
2296 | void device_remove_file(struct device *dev, |
2297 | const struct device_attribute *attr) | |
1da177e4 | 2298 | { |
0c98b19f | 2299 | if (dev) |
1da177e4 | 2300 | sysfs_remove_file(&dev->kobj, &attr->attr); |
1da177e4 | 2301 | } |
86df2687 | 2302 | EXPORT_SYMBOL_GPL(device_remove_file); |
1da177e4 | 2303 | |
6b0afc2a TH |
2304 | /** |
2305 | * device_remove_file_self - remove sysfs attribute file from its own method. | |
2306 | * @dev: device. | |
2307 | * @attr: device attribute descriptor. | |
2308 | * | |
2309 | * See kernfs_remove_self() for details. | |
2310 | */ | |
2311 | bool device_remove_file_self(struct device *dev, | |
2312 | const struct device_attribute *attr) | |
2313 | { | |
2314 | if (dev) | |
2315 | return sysfs_remove_file_self(&dev->kobj, &attr->attr); | |
2316 | else | |
2317 | return false; | |
2318 | } | |
2319 | EXPORT_SYMBOL_GPL(device_remove_file_self); | |
2320 | ||
2589f188 GKH |
2321 | /** |
2322 | * device_create_bin_file - create sysfs binary attribute file for device. | |
2323 | * @dev: device. | |
2324 | * @attr: device binary attribute descriptor. | |
2325 | */ | |
66ecb92b PC |
2326 | int device_create_bin_file(struct device *dev, |
2327 | const struct bin_attribute *attr) | |
2589f188 GKH |
2328 | { |
2329 | int error = -EINVAL; | |
2330 | if (dev) | |
2331 | error = sysfs_create_bin_file(&dev->kobj, attr); | |
2332 | return error; | |
2333 | } | |
2334 | EXPORT_SYMBOL_GPL(device_create_bin_file); | |
2335 | ||
2336 | /** | |
2337 | * device_remove_bin_file - remove sysfs binary attribute file | |
2338 | * @dev: device. | |
2339 | * @attr: device binary attribute descriptor. | |
2340 | */ | |
66ecb92b PC |
2341 | void device_remove_bin_file(struct device *dev, |
2342 | const struct bin_attribute *attr) | |
2589f188 GKH |
2343 | { |
2344 | if (dev) | |
2345 | sysfs_remove_bin_file(&dev->kobj, attr); | |
2346 | } | |
2347 | EXPORT_SYMBOL_GPL(device_remove_bin_file); | |
2348 | ||
34bb61f9 JB |
2349 | static void klist_children_get(struct klist_node *n) |
2350 | { | |
f791b8c8 GKH |
2351 | struct device_private *p = to_device_private_parent(n); |
2352 | struct device *dev = p->device; | |
34bb61f9 JB |
2353 | |
2354 | get_device(dev); | |
2355 | } | |
2356 | ||
2357 | static void klist_children_put(struct klist_node *n) | |
2358 | { | |
f791b8c8 GKH |
2359 | struct device_private *p = to_device_private_parent(n); |
2360 | struct device *dev = p->device; | |
34bb61f9 JB |
2361 | |
2362 | put_device(dev); | |
2363 | } | |
2364 | ||
1da177e4 | 2365 | /** |
4a3ad20c GKH |
2366 | * device_initialize - init device structure. |
2367 | * @dev: device. | |
1da177e4 | 2368 | * |
5739411a CH |
2369 | * This prepares the device for use by other layers by initializing |
2370 | * its fields. | |
4a3ad20c | 2371 | * It is the first half of device_register(), if called by |
5739411a CH |
2372 | * that function, though it can also be called separately, so one |
2373 | * may use @dev's fields. In particular, get_device()/put_device() | |
2374 | * may be used for reference counting of @dev after calling this | |
2375 | * function. | |
2376 | * | |
b10d5efd AS |
2377 | * All fields in @dev must be initialized by the caller to 0, except |
2378 | * for those explicitly set to some other value. The simplest | |
2379 | * approach is to use kzalloc() to allocate the structure containing | |
2380 | * @dev. | |
2381 | * | |
5739411a CH |
2382 | * NOTE: Use put_device() to give up your reference instead of freeing |
2383 | * @dev directly once you have called this function. | |
1da177e4 | 2384 | */ |
1da177e4 LT |
2385 | void device_initialize(struct device *dev) |
2386 | { | |
881c6cfd | 2387 | dev->kobj.kset = devices_kset; |
f9cb074b | 2388 | kobject_init(&dev->kobj, &device_ktype); |
1da177e4 | 2389 | INIT_LIST_HEAD(&dev->dma_pools); |
3142788b | 2390 | mutex_init(&dev->mutex); |
87a30e1f DW |
2391 | #ifdef CONFIG_PROVE_LOCKING |
2392 | mutex_init(&dev->lockdep_mutex); | |
2393 | #endif | |
1704f47b | 2394 | lockdep_set_novalidate_class(&dev->mutex); |
9ac7849e TH |
2395 | spin_lock_init(&dev->devres_lock); |
2396 | INIT_LIST_HEAD(&dev->devres_head); | |
3b98aeaf | 2397 | device_pm_init(dev); |
87348136 | 2398 | set_dev_node(dev, -1); |
4a7cc831 JL |
2399 | #ifdef CONFIG_GENERIC_MSI_IRQ |
2400 | INIT_LIST_HEAD(&dev->msi_list); | |
2401 | #endif | |
9ed98953 RW |
2402 | INIT_LIST_HEAD(&dev->links.consumers); |
2403 | INIT_LIST_HEAD(&dev->links.suppliers); | |
e2ae9bcc | 2404 | INIT_LIST_HEAD(&dev->links.needs_suppliers); |
3b052a3e | 2405 | INIT_LIST_HEAD(&dev->links.defer_sync); |
9ed98953 | 2406 | dev->links.status = DL_DEV_NO_DRIVER; |
1da177e4 | 2407 | } |
86df2687 | 2408 | EXPORT_SYMBOL_GPL(device_initialize); |
1da177e4 | 2409 | |
d73ce004 | 2410 | struct kobject *virtual_device_parent(struct device *dev) |
f0ee61a6 | 2411 | { |
86406245 | 2412 | static struct kobject *virtual_dir = NULL; |
f0ee61a6 | 2413 | |
86406245 | 2414 | if (!virtual_dir) |
4ff6abff | 2415 | virtual_dir = kobject_create_and_add("virtual", |
881c6cfd | 2416 | &devices_kset->kobj); |
f0ee61a6 | 2417 | |
86406245 | 2418 | return virtual_dir; |
f0ee61a6 GKH |
2419 | } |
2420 | ||
bc451f20 EB |
2421 | struct class_dir { |
2422 | struct kobject kobj; | |
2423 | struct class *class; | |
2424 | }; | |
2425 | ||
2426 | #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) | |
2427 | ||
2428 | static void class_dir_release(struct kobject *kobj) | |
2429 | { | |
2430 | struct class_dir *dir = to_class_dir(kobj); | |
2431 | kfree(dir); | |
2432 | } | |
2433 | ||
2434 | static const | |
2435 | struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) | |
40fa5422 | 2436 | { |
bc451f20 EB |
2437 | struct class_dir *dir = to_class_dir(kobj); |
2438 | return dir->class->ns_type; | |
2439 | } | |
2440 | ||
2441 | static struct kobj_type class_dir_ktype = { | |
2442 | .release = class_dir_release, | |
2443 | .sysfs_ops = &kobj_sysfs_ops, | |
2444 | .child_ns_type = class_dir_child_ns_type | |
2445 | }; | |
2446 | ||
2447 | static struct kobject * | |
2448 | class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) | |
2449 | { | |
2450 | struct class_dir *dir; | |
43968d2f GKH |
2451 | int retval; |
2452 | ||
bc451f20 EB |
2453 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
2454 | if (!dir) | |
84d0c27d | 2455 | return ERR_PTR(-ENOMEM); |
bc451f20 EB |
2456 | |
2457 | dir->class = class; | |
2458 | kobject_init(&dir->kobj, &class_dir_ktype); | |
2459 | ||
6b6e39a6 | 2460 | dir->kobj.kset = &class->p->glue_dirs; |
bc451f20 EB |
2461 | |
2462 | retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); | |
2463 | if (retval < 0) { | |
2464 | kobject_put(&dir->kobj); | |
84d0c27d | 2465 | return ERR_PTR(retval); |
bc451f20 EB |
2466 | } |
2467 | return &dir->kobj; | |
2468 | } | |
2469 | ||
e4a60d13 | 2470 | static DEFINE_MUTEX(gdp_mutex); |
bc451f20 EB |
2471 | |
2472 | static struct kobject *get_device_parent(struct device *dev, | |
2473 | struct device *parent) | |
2474 | { | |
86406245 KS |
2475 | if (dev->class) { |
2476 | struct kobject *kobj = NULL; | |
2477 | struct kobject *parent_kobj; | |
2478 | struct kobject *k; | |
2479 | ||
ead454fe | 2480 | #ifdef CONFIG_BLOCK |
39aba963 | 2481 | /* block disks show up in /sys/block */ |
e52eec13 | 2482 | if (sysfs_deprecated && dev->class == &block_class) { |
39aba963 KS |
2483 | if (parent && parent->class == &block_class) |
2484 | return &parent->kobj; | |
6b6e39a6 | 2485 | return &block_class.p->subsys.kobj; |
39aba963 | 2486 | } |
ead454fe | 2487 | #endif |
e52eec13 | 2488 | |
86406245 KS |
2489 | /* |
2490 | * If we have no parent, we live in "virtual". | |
0f4dafc0 KS |
2491 | * Class-devices with a non class-device as parent, live |
2492 | * in a "glue" directory to prevent namespace collisions. | |
86406245 KS |
2493 | */ |
2494 | if (parent == NULL) | |
2495 | parent_kobj = virtual_device_parent(dev); | |
24b1442d | 2496 | else if (parent->class && !dev->class->ns_type) |
86406245 KS |
2497 | return &parent->kobj; |
2498 | else | |
2499 | parent_kobj = &parent->kobj; | |
2500 | ||
77d3d7c1 TH |
2501 | mutex_lock(&gdp_mutex); |
2502 | ||
86406245 | 2503 | /* find our class-directory at the parent and reference it */ |
6b6e39a6 KS |
2504 | spin_lock(&dev->class->p->glue_dirs.list_lock); |
2505 | list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) | |
86406245 KS |
2506 | if (k->parent == parent_kobj) { |
2507 | kobj = kobject_get(k); | |
2508 | break; | |
2509 | } | |
6b6e39a6 | 2510 | spin_unlock(&dev->class->p->glue_dirs.list_lock); |
77d3d7c1 TH |
2511 | if (kobj) { |
2512 | mutex_unlock(&gdp_mutex); | |
86406245 | 2513 | return kobj; |
77d3d7c1 | 2514 | } |
86406245 KS |
2515 | |
2516 | /* or create a new class-directory at the parent device */ | |
bc451f20 | 2517 | k = class_dir_create_and_add(dev->class, parent_kobj); |
0f4dafc0 | 2518 | /* do not emit an uevent for this simple "glue" directory */ |
77d3d7c1 | 2519 | mutex_unlock(&gdp_mutex); |
43968d2f | 2520 | return k; |
86406245 KS |
2521 | } |
2522 | ||
ca22e56d KS |
2523 | /* subsystems can specify a default root directory for their devices */ |
2524 | if (!parent && dev->bus && dev->bus->dev_root) | |
2525 | return &dev->bus->dev_root->kobj; | |
2526 | ||
86406245 | 2527 | if (parent) |
c744aeae CH |
2528 | return &parent->kobj; |
2529 | return NULL; | |
2530 | } | |
da231fd5 | 2531 | |
cebf8fd1 ML |
2532 | static inline bool live_in_glue_dir(struct kobject *kobj, |
2533 | struct device *dev) | |
2534 | { | |
2535 | if (!kobj || !dev->class || | |
2536 | kobj->kset != &dev->class->p->glue_dirs) | |
2537 | return false; | |
2538 | return true; | |
2539 | } | |
2540 | ||
2541 | static inline struct kobject *get_glue_dir(struct device *dev) | |
2542 | { | |
2543 | return dev->kobj.parent; | |
2544 | } | |
2545 | ||
2546 | /* | |
2547 | * make sure cleaning up dir as the last step, we need to make | |
2548 | * sure .release handler of kobject is run with holding the | |
2549 | * global lock | |
2550 | */ | |
63b6971a | 2551 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) |
da231fd5 | 2552 | { |
ac43432c MS |
2553 | unsigned int ref; |
2554 | ||
0f4dafc0 | 2555 | /* see if we live in a "glue" directory */ |
cebf8fd1 | 2556 | if (!live_in_glue_dir(glue_dir, dev)) |
da231fd5 KS |
2557 | return; |
2558 | ||
e4a60d13 | 2559 | mutex_lock(&gdp_mutex); |
ac43432c MS |
2560 | /** |
2561 | * There is a race condition between removing glue directory | |
2562 | * and adding a new device under the glue directory. | |
2563 | * | |
2564 | * CPU1: CPU2: | |
2565 | * | |
2566 | * device_add() | |
2567 | * get_device_parent() | |
2568 | * class_dir_create_and_add() | |
2569 | * kobject_add_internal() | |
2570 | * create_dir() // create glue_dir | |
2571 | * | |
2572 | * device_add() | |
2573 | * get_device_parent() | |
2574 | * kobject_get() // get glue_dir | |
2575 | * | |
2576 | * device_del() | |
2577 | * cleanup_glue_dir() | |
2578 | * kobject_del(glue_dir) | |
2579 | * | |
2580 | * kobject_add() | |
2581 | * kobject_add_internal() | |
2582 | * create_dir() // in glue_dir | |
2583 | * sysfs_create_dir_ns() | |
2584 | * kernfs_create_dir_ns(sd) | |
2585 | * | |
2586 | * sysfs_remove_dir() // glue_dir->sd=NULL | |
2587 | * sysfs_put() // free glue_dir->sd | |
2588 | * | |
2589 | * // sd is freed | |
2590 | * kernfs_new_node(sd) | |
2591 | * kernfs_get(glue_dir) | |
2592 | * kernfs_add_one() | |
2593 | * kernfs_put() | |
2594 | * | |
2595 | * Before CPU1 remove last child device under glue dir, if CPU2 add | |
2596 | * a new device under glue dir, the glue_dir kobject reference count | |
2597 | * will be increase to 2 in kobject_get(k). And CPU2 has been called | |
2598 | * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() | |
2599 | * and sysfs_put(). This result in glue_dir->sd is freed. | |
2600 | * | |
2601 | * Then the CPU2 will see a stale "empty" but still potentially used | |
2602 | * glue dir around in kernfs_new_node(). | |
2603 | * | |
2604 | * In order to avoid this happening, we also should make sure that | |
2605 | * kernfs_node for glue_dir is released in CPU1 only when refcount | |
2606 | * for glue_dir kobj is 1. | |
2607 | */ | |
2608 | ref = kref_read(&glue_dir->kref); | |
2609 | if (!kobject_has_children(glue_dir) && !--ref) | |
726e4109 | 2610 | kobject_del(glue_dir); |
0f4dafc0 | 2611 | kobject_put(glue_dir); |
e4a60d13 | 2612 | mutex_unlock(&gdp_mutex); |
da231fd5 | 2613 | } |
63b6971a | 2614 | |
2ee97caf CH |
2615 | static int device_add_class_symlinks(struct device *dev) |
2616 | { | |
5590f319 | 2617 | struct device_node *of_node = dev_of_node(dev); |
2ee97caf CH |
2618 | int error; |
2619 | ||
5590f319 | 2620 | if (of_node) { |
0c3c234b | 2621 | error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); |
5590f319 BH |
2622 | if (error) |
2623 | dev_warn(dev, "Error %d creating of_node link\n",error); | |
2624 | /* An error here doesn't warrant bringing down the device */ | |
2625 | } | |
2626 | ||
2ee97caf CH |
2627 | if (!dev->class) |
2628 | return 0; | |
da231fd5 | 2629 | |
1fbfee6c | 2630 | error = sysfs_create_link(&dev->kobj, |
6b6e39a6 | 2631 | &dev->class->p->subsys.kobj, |
2ee97caf CH |
2632 | "subsystem"); |
2633 | if (error) | |
5590f319 | 2634 | goto out_devnode; |
da231fd5 | 2635 | |
4e886c29 | 2636 | if (dev->parent && device_is_not_partition(dev)) { |
39aba963 | 2637 | error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, |
4f01a757 DT |
2638 | "device"); |
2639 | if (error) | |
39aba963 | 2640 | goto out_subsys; |
2ee97caf | 2641 | } |
2ee97caf | 2642 | |
ead454fe | 2643 | #ifdef CONFIG_BLOCK |
39aba963 | 2644 | /* /sys/block has directories and does not need symlinks */ |
e52eec13 | 2645 | if (sysfs_deprecated && dev->class == &block_class) |
39aba963 | 2646 | return 0; |
ead454fe | 2647 | #endif |
39aba963 | 2648 | |
da231fd5 | 2649 | /* link in the class directory pointing to the device */ |
6b6e39a6 | 2650 | error = sysfs_create_link(&dev->class->p->subsys.kobj, |
1e0b2cf9 | 2651 | &dev->kobj, dev_name(dev)); |
da231fd5 | 2652 | if (error) |
39aba963 | 2653 | goto out_device; |
da231fd5 | 2654 | |
da231fd5 KS |
2655 | return 0; |
2656 | ||
39aba963 KS |
2657 | out_device: |
2658 | sysfs_remove_link(&dev->kobj, "device"); | |
da231fd5 | 2659 | |
2ee97caf CH |
2660 | out_subsys: |
2661 | sysfs_remove_link(&dev->kobj, "subsystem"); | |
5590f319 BH |
2662 | out_devnode: |
2663 | sysfs_remove_link(&dev->kobj, "of_node"); | |
2ee97caf CH |
2664 | return error; |
2665 | } | |
2666 | ||
2667 | static void device_remove_class_symlinks(struct device *dev) | |
2668 | { | |
5590f319 BH |
2669 | if (dev_of_node(dev)) |
2670 | sysfs_remove_link(&dev->kobj, "of_node"); | |
2671 | ||
2ee97caf CH |
2672 | if (!dev->class) |
2673 | return; | |
da231fd5 | 2674 | |
4e886c29 | 2675 | if (dev->parent && device_is_not_partition(dev)) |
da231fd5 | 2676 | sysfs_remove_link(&dev->kobj, "device"); |
2ee97caf | 2677 | sysfs_remove_link(&dev->kobj, "subsystem"); |
ead454fe | 2678 | #ifdef CONFIG_BLOCK |
e52eec13 | 2679 | if (sysfs_deprecated && dev->class == &block_class) |
39aba963 | 2680 | return; |
ead454fe | 2681 | #endif |
6b6e39a6 | 2682 | sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); |
2ee97caf CH |
2683 | } |
2684 | ||
413c239f SR |
2685 | /** |
2686 | * dev_set_name - set a device name | |
2687 | * @dev: device | |
46232366 | 2688 | * @fmt: format string for the device's name |
413c239f SR |
2689 | */ |
2690 | int dev_set_name(struct device *dev, const char *fmt, ...) | |
2691 | { | |
2692 | va_list vargs; | |
1fa5ae85 | 2693 | int err; |
413c239f SR |
2694 | |
2695 | va_start(vargs, fmt); | |
1fa5ae85 | 2696 | err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); |
413c239f | 2697 | va_end(vargs); |
1fa5ae85 | 2698 | return err; |
413c239f SR |
2699 | } |
2700 | EXPORT_SYMBOL_GPL(dev_set_name); | |
2701 | ||
e105b8bf DW |
2702 | /** |
2703 | * device_to_dev_kobj - select a /sys/dev/ directory for the device | |
2704 | * @dev: device | |
2705 | * | |
2706 | * By default we select char/ for new entries. Setting class->dev_obj | |
2707 | * to NULL prevents an entry from being created. class->dev_kobj must | |
2708 | * be set (or cleared) before any devices are registered to the class | |
2709 | * otherwise device_create_sys_dev_entry() and | |
0d4e293c PK |
2710 | * device_remove_sys_dev_entry() will disagree about the presence of |
2711 | * the link. | |
e105b8bf DW |
2712 | */ |
2713 | static struct kobject *device_to_dev_kobj(struct device *dev) | |
2714 | { | |
2715 | struct kobject *kobj; | |
2716 | ||
2717 | if (dev->class) | |
2718 | kobj = dev->class->dev_kobj; | |
2719 | else | |
2720 | kobj = sysfs_dev_char_kobj; | |
2721 | ||
2722 | return kobj; | |
2723 | } | |
2724 | ||
2725 | static int device_create_sys_dev_entry(struct device *dev) | |
2726 | { | |
2727 | struct kobject *kobj = device_to_dev_kobj(dev); | |
2728 | int error = 0; | |
2729 | char devt_str[15]; | |
2730 | ||
2731 | if (kobj) { | |
2732 | format_dev_t(devt_str, dev->devt); | |
2733 | error = sysfs_create_link(kobj, &dev->kobj, devt_str); | |
2734 | } | |
2735 | ||
2736 | return error; | |
2737 | } | |
2738 | ||
2739 | static void device_remove_sys_dev_entry(struct device *dev) | |
2740 | { | |
2741 | struct kobject *kobj = device_to_dev_kobj(dev); | |
2742 | char devt_str[15]; | |
2743 | ||
2744 | if (kobj) { | |
2745 | format_dev_t(devt_str, dev->devt); | |
2746 | sysfs_remove_link(kobj, devt_str); | |
2747 | } | |
2748 | } | |
2749 | ||
46d3a037 | 2750 | static int device_private_init(struct device *dev) |
b4028437 GKH |
2751 | { |
2752 | dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); | |
2753 | if (!dev->p) | |
2754 | return -ENOMEM; | |
2755 | dev->p->device = dev; | |
2756 | klist_init(&dev->p->klist_children, klist_children_get, | |
2757 | klist_children_put); | |
ef8a3fd6 | 2758 | INIT_LIST_HEAD(&dev->p->deferred_probe); |
b4028437 GKH |
2759 | return 0; |
2760 | } | |
2761 | ||
1da177e4 | 2762 | /** |
4a3ad20c GKH |
2763 | * device_add - add device to device hierarchy. |
2764 | * @dev: device. | |
1da177e4 | 2765 | * |
4a3ad20c GKH |
2766 | * This is part 2 of device_register(), though may be called |
2767 | * separately _iff_ device_initialize() has been called separately. | |
1da177e4 | 2768 | * |
5739411a | 2769 | * This adds @dev to the kobject hierarchy via kobject_add(), adds it |
4a3ad20c GKH |
2770 | * to the global and sibling lists for the device, then |
2771 | * adds it to the other relevant subsystems of the driver model. | |
5739411a | 2772 | * |
b10d5efd AS |
2773 | * Do not call this routine or device_register() more than once for |
2774 | * any device structure. The driver model core is not designed to work | |
2775 | * with devices that get unregistered and then spring back to life. | |
2776 | * (Among other things, it's very hard to guarantee that all references | |
2777 | * to the previous incarnation of @dev have been dropped.) Allocate | |
2778 | * and register a fresh new struct device instead. | |
2779 | * | |
5739411a CH |
2780 | * NOTE: _Never_ directly free @dev after calling this function, even |
2781 | * if it returned an error! Always use put_device() to give up your | |
2782 | * reference instead. | |
affada72 BP |
2783 | * |
2784 | * Rule of thumb is: if device_add() succeeds, you should call | |
2785 | * device_del() when you want to get rid of it. If device_add() has | |
2786 | * *not* succeeded, use *only* put_device() to drop the reference | |
2787 | * count. | |
1da177e4 LT |
2788 | */ |
2789 | int device_add(struct device *dev) | |
2790 | { | |
35dbf4ef | 2791 | struct device *parent; |
ca22e56d | 2792 | struct kobject *kobj; |
c47ed219 | 2793 | struct class_interface *class_intf; |
5f5377ea | 2794 | int error = -EINVAL; |
cebf8fd1 | 2795 | struct kobject *glue_dir = NULL; |
775b64d2 | 2796 | |
1da177e4 | 2797 | dev = get_device(dev); |
c906a48a GKH |
2798 | if (!dev) |
2799 | goto done; | |
2800 | ||
fb069a5d | 2801 | if (!dev->p) { |
b4028437 GKH |
2802 | error = device_private_init(dev); |
2803 | if (error) | |
2804 | goto done; | |
fb069a5d | 2805 | } |
fb069a5d | 2806 | |
1fa5ae85 KS |
2807 | /* |
2808 | * for statically allocated devices, which should all be converted | |
2809 | * some day, we need to initialize the name. We prevent reading back | |
2810 | * the name, and force the use of dev_name() | |
2811 | */ | |
2812 | if (dev->init_name) { | |
acc0e90f | 2813 | dev_set_name(dev, "%s", dev->init_name); |
1fa5ae85 KS |
2814 | dev->init_name = NULL; |
2815 | } | |
c906a48a | 2816 | |
ca22e56d KS |
2817 | /* subsystems can specify simple device enumeration */ |
2818 | if (!dev_name(dev) && dev->bus && dev->bus->dev_name) | |
2819 | dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); | |
2820 | ||
e6309e75 TG |
2821 | if (!dev_name(dev)) { |
2822 | error = -EINVAL; | |
5c8563d7 | 2823 | goto name_error; |
e6309e75 | 2824 | } |
1da177e4 | 2825 | |
1e0b2cf9 | 2826 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); |
c205ef48 | 2827 | |
1da177e4 | 2828 | parent = get_device(dev->parent); |
ca22e56d | 2829 | kobj = get_device_parent(dev, parent); |
84d0c27d TH |
2830 | if (IS_ERR(kobj)) { |
2831 | error = PTR_ERR(kobj); | |
2832 | goto parent_error; | |
2833 | } | |
ca22e56d KS |
2834 | if (kobj) |
2835 | dev->kobj.parent = kobj; | |
1da177e4 | 2836 | |
0d358f22 | 2837 | /* use parent numa_node */ |
56f2de81 | 2838 | if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) |
0d358f22 YL |
2839 | set_dev_node(dev, dev_to_node(parent)); |
2840 | ||
1da177e4 | 2841 | /* first, register with generic layer. */ |
8a577ffc KS |
2842 | /* we require the name to be set before, and pass NULL */ |
2843 | error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); | |
cebf8fd1 ML |
2844 | if (error) { |
2845 | glue_dir = get_glue_dir(dev); | |
1da177e4 | 2846 | goto Error; |
cebf8fd1 | 2847 | } |
a7fd6706 | 2848 | |
37022644 | 2849 | /* notify platform of device entry */ |
07de0e86 HK |
2850 | error = device_platform_notify(dev, KOBJ_ADD); |
2851 | if (error) | |
2852 | goto platform_error; | |
37022644 | 2853 | |
c5e064a6 | 2854 | error = device_create_file(dev, &dev_attr_uevent); |
a306eea4 CH |
2855 | if (error) |
2856 | goto attrError; | |
a7fd6706 | 2857 | |
2ee97caf CH |
2858 | error = device_add_class_symlinks(dev); |
2859 | if (error) | |
2860 | goto SymlinkError; | |
dc0afa83 CH |
2861 | error = device_add_attrs(dev); |
2862 | if (error) | |
2620efef | 2863 | goto AttrsError; |
dc0afa83 CH |
2864 | error = bus_add_device(dev); |
2865 | if (error) | |
1da177e4 | 2866 | goto BusError; |
3b98aeaf | 2867 | error = dpm_sysfs_add(dev); |
57eee3d2 | 2868 | if (error) |
3b98aeaf AS |
2869 | goto DPMError; |
2870 | device_pm_add(dev); | |
ec0676ee | 2871 | |
0cd75047 SK |
2872 | if (MAJOR(dev->devt)) { |
2873 | error = device_create_file(dev, &dev_attr_dev); | |
2874 | if (error) | |
2875 | goto DevAttrError; | |
2876 | ||
2877 | error = device_create_sys_dev_entry(dev); | |
2878 | if (error) | |
2879 | goto SysEntryError; | |
2880 | ||
2881 | devtmpfs_create_node(dev); | |
2882 | } | |
2883 | ||
ec0676ee | 2884 | /* Notify clients of device addition. This call must come |
268863f4 | 2885 | * after dpm_sysfs_add() and before kobject_uevent(). |
ec0676ee AS |
2886 | */ |
2887 | if (dev->bus) | |
2888 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | |
2889 | BUS_NOTIFY_ADD_DEVICE, dev); | |
2890 | ||
83b5fb4c | 2891 | kobject_uevent(&dev->kobj, KOBJ_ADD); |
372a67c0 | 2892 | |
e2ae9bcc SK |
2893 | /* |
2894 | * Check if any of the other devices (consumers) have been waiting for | |
2895 | * this device (supplier) to be added so that they can create a device | |
2896 | * link to it. | |
2897 | * | |
2898 | * This needs to happen after device_pm_add() because device_link_add() | |
2899 | * requires the supplier be registered before it's called. | |
2900 | * | |
2cd38fd1 | 2901 | * But this also needs to happen before bus_probe_device() to make sure |
e2ae9bcc SK |
2902 | * waiting consumers can link to it before the driver is bound to the |
2903 | * device and the driver sync_state callback is called for this device. | |
2904 | */ | |
2cd38fd1 SK |
2905 | if (dev->fwnode && !dev->fwnode->dev) { |
2906 | dev->fwnode->dev = dev; | |
5f5377ea | 2907 | fw_devlink_link_device(dev); |
03324507 | 2908 | } |
e2ae9bcc | 2909 | |
2023c610 | 2910 | bus_probe_device(dev); |
1da177e4 | 2911 | if (parent) |
f791b8c8 GKH |
2912 | klist_add_tail(&dev->p->knode_parent, |
2913 | &parent->p->klist_children); | |
1da177e4 | 2914 | |
5d9fd169 | 2915 | if (dev->class) { |
ca22e56d | 2916 | mutex_lock(&dev->class->p->mutex); |
c47ed219 | 2917 | /* tie the class to the device */ |
570d0200 | 2918 | klist_add_tail(&dev->p->knode_class, |
6b6e39a6 | 2919 | &dev->class->p->klist_devices); |
c47ed219 GKH |
2920 | |
2921 | /* notify any interfaces that the device is here */ | |
184f1f77 | 2922 | list_for_each_entry(class_intf, |
ca22e56d | 2923 | &dev->class->p->interfaces, node) |
c47ed219 GKH |
2924 | if (class_intf->add_dev) |
2925 | class_intf->add_dev(dev, class_intf); | |
ca22e56d | 2926 | mutex_unlock(&dev->class->p->mutex); |
5d9fd169 | 2927 | } |
c906a48a | 2928 | done: |
1da177e4 LT |
2929 | put_device(dev); |
2930 | return error; | |
0cd75047 SK |
2931 | SysEntryError: |
2932 | if (MAJOR(dev->devt)) | |
2933 | device_remove_file(dev, &dev_attr_dev); | |
2934 | DevAttrError: | |
2935 | device_pm_remove(dev); | |
2936 | dpm_sysfs_remove(dev); | |
3b98aeaf | 2937 | DPMError: |
57eee3d2 RW |
2938 | bus_remove_device(dev); |
2939 | BusError: | |
82f0cf9b | 2940 | device_remove_attrs(dev); |
2620efef | 2941 | AttrsError: |
2ee97caf CH |
2942 | device_remove_class_symlinks(dev); |
2943 | SymlinkError: | |
c5e064a6 | 2944 | device_remove_file(dev, &dev_attr_uevent); |
23681e47 | 2945 | attrError: |
07de0e86 HK |
2946 | device_platform_notify(dev, KOBJ_REMOVE); |
2947 | platform_error: | |
312c004d | 2948 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); |
cebf8fd1 | 2949 | glue_dir = get_glue_dir(dev); |
1da177e4 LT |
2950 | kobject_del(&dev->kobj); |
2951 | Error: | |
cebf8fd1 | 2952 | cleanup_glue_dir(dev, glue_dir); |
84d0c27d | 2953 | parent_error: |
5f0163a5 | 2954 | put_device(parent); |
5c8563d7 KS |
2955 | name_error: |
2956 | kfree(dev->p); | |
2957 | dev->p = NULL; | |
c906a48a | 2958 | goto done; |
1da177e4 | 2959 | } |
86df2687 | 2960 | EXPORT_SYMBOL_GPL(device_add); |
1da177e4 | 2961 | |
1da177e4 | 2962 | /** |
4a3ad20c GKH |
2963 | * device_register - register a device with the system. |
2964 | * @dev: pointer to the device structure | |
1da177e4 | 2965 | * |
4a3ad20c GKH |
2966 | * This happens in two clean steps - initialize the device |
2967 | * and add it to the system. The two steps can be called | |
2968 | * separately, but this is the easiest and most common. | |
2969 | * I.e. you should only call the two helpers separately if | |
2970 | * have a clearly defined need to use and refcount the device | |
2971 | * before it is added to the hierarchy. | |
5739411a | 2972 | * |
b10d5efd AS |
2973 | * For more information, see the kerneldoc for device_initialize() |
2974 | * and device_add(). | |
2975 | * | |
5739411a CH |
2976 | * NOTE: _Never_ directly free @dev after calling this function, even |
2977 | * if it returned an error! Always use put_device() to give up the | |
2978 | * reference initialized in this function instead. | |
1da177e4 | 2979 | */ |
1da177e4 LT |
2980 | int device_register(struct device *dev) |
2981 | { | |
2982 | device_initialize(dev); | |
2983 | return device_add(dev); | |
2984 | } | |
86df2687 | 2985 | EXPORT_SYMBOL_GPL(device_register); |
1da177e4 | 2986 | |
1da177e4 | 2987 | /** |
4a3ad20c GKH |
2988 | * get_device - increment reference count for device. |
2989 | * @dev: device. | |
1da177e4 | 2990 | * |
4a3ad20c GKH |
2991 | * This simply forwards the call to kobject_get(), though |
2992 | * we do take care to provide for the case that we get a NULL | |
2993 | * pointer passed in. | |
1da177e4 | 2994 | */ |
4a3ad20c | 2995 | struct device *get_device(struct device *dev) |
1da177e4 | 2996 | { |
b0d1f807 | 2997 | return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; |
1da177e4 | 2998 | } |
86df2687 | 2999 | EXPORT_SYMBOL_GPL(get_device); |
1da177e4 | 3000 | |
1da177e4 | 3001 | /** |
4a3ad20c GKH |
3002 | * put_device - decrement reference count. |
3003 | * @dev: device in question. | |
1da177e4 | 3004 | */ |
4a3ad20c | 3005 | void put_device(struct device *dev) |
1da177e4 | 3006 | { |
edfaa7c3 | 3007 | /* might_sleep(); */ |
1da177e4 LT |
3008 | if (dev) |
3009 | kobject_put(&dev->kobj); | |
3010 | } | |
86df2687 | 3011 | EXPORT_SYMBOL_GPL(put_device); |
1da177e4 | 3012 | |
00289cd8 DW |
3013 | bool kill_device(struct device *dev) |
3014 | { | |
3015 | /* | |
3016 | * Require the device lock and set the "dead" flag to guarantee that | |
3017 | * the update behavior is consistent with the other bitfields near | |
3018 | * it and that we cannot have an asynchronous probe routine trying | |
3019 | * to run while we are tearing out the bus/class/sysfs from | |
3020 | * underneath the device. | |
3021 | */ | |
3022 | lockdep_assert_held(&dev->mutex); | |
3023 | ||
3024 | if (dev->p->dead) | |
3025 | return false; | |
3026 | dev->p->dead = true; | |
3027 | return true; | |
3028 | } | |
3029 | EXPORT_SYMBOL_GPL(kill_device); | |
3030 | ||
1da177e4 | 3031 | /** |
4a3ad20c GKH |
3032 | * device_del - delete device from system. |
3033 | * @dev: device. | |
1da177e4 | 3034 | * |
4a3ad20c GKH |
3035 | * This is the first part of the device unregistration |
3036 | * sequence. This removes the device from the lists we control | |
3037 | * from here, has it removed from the other driver model | |
3038 | * subsystems it was added to in device_add(), and removes it | |
3039 | * from the kobject hierarchy. | |
1da177e4 | 3040 | * |
4a3ad20c GKH |
3041 | * NOTE: this should be called manually _iff_ device_add() was |
3042 | * also called manually. | |
1da177e4 | 3043 | */ |
4a3ad20c | 3044 | void device_del(struct device *dev) |
1da177e4 | 3045 | { |
4a3ad20c | 3046 | struct device *parent = dev->parent; |
cebf8fd1 | 3047 | struct kobject *glue_dir = NULL; |
c47ed219 | 3048 | struct class_interface *class_intf; |
b8530017 | 3049 | unsigned int noio_flag; |
1da177e4 | 3050 | |
3451a495 | 3051 | device_lock(dev); |
00289cd8 | 3052 | kill_device(dev); |
3451a495 AD |
3053 | device_unlock(dev); |
3054 | ||
372a67c0 SK |
3055 | if (dev->fwnode && dev->fwnode->dev == dev) |
3056 | dev->fwnode->dev = NULL; | |
3057 | ||
ec0676ee AS |
3058 | /* Notify clients of device removal. This call must come |
3059 | * before dpm_sysfs_remove(). | |
3060 | */ | |
b8530017 | 3061 | noio_flag = memalloc_noio_save(); |
ec0676ee AS |
3062 | if (dev->bus) |
3063 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | |
3064 | BUS_NOTIFY_DEL_DEVICE, dev); | |
9ed98953 | 3065 | |
3b98aeaf | 3066 | dpm_sysfs_remove(dev); |
1da177e4 | 3067 | if (parent) |
f791b8c8 | 3068 | klist_del(&dev->p->knode_parent); |
e105b8bf | 3069 | if (MAJOR(dev->devt)) { |
2b2af54a | 3070 | devtmpfs_delete_node(dev); |
e105b8bf | 3071 | device_remove_sys_dev_entry(dev); |
c5e064a6 | 3072 | device_remove_file(dev, &dev_attr_dev); |
e105b8bf | 3073 | } |
b9d9c82b | 3074 | if (dev->class) { |
da231fd5 | 3075 | device_remove_class_symlinks(dev); |
99ef3ef8 | 3076 | |
ca22e56d | 3077 | mutex_lock(&dev->class->p->mutex); |
c47ed219 | 3078 | /* notify any interfaces that the device is now gone */ |
184f1f77 | 3079 | list_for_each_entry(class_intf, |
ca22e56d | 3080 | &dev->class->p->interfaces, node) |
c47ed219 GKH |
3081 | if (class_intf->remove_dev) |
3082 | class_intf->remove_dev(dev, class_intf); | |
3083 | /* remove the device from the class list */ | |
570d0200 | 3084 | klist_del(&dev->p->knode_class); |
ca22e56d | 3085 | mutex_unlock(&dev->class->p->mutex); |
b9d9c82b | 3086 | } |
c5e064a6 | 3087 | device_remove_file(dev, &dev_attr_uevent); |
2620efef | 3088 | device_remove_attrs(dev); |
28953533 | 3089 | bus_remove_device(dev); |
4b6d1f12 | 3090 | device_pm_remove(dev); |
d1c3414c | 3091 | driver_deferred_probe_del(dev); |
07de0e86 | 3092 | device_platform_notify(dev, KOBJ_REMOVE); |
478573c9 | 3093 | device_remove_properties(dev); |
2ec16150 | 3094 | device_links_purge(dev); |
1da177e4 | 3095 | |
599bad38 JR |
3096 | if (dev->bus) |
3097 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | |
3098 | BUS_NOTIFY_REMOVED_DEVICE, dev); | |
312c004d | 3099 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); |
cebf8fd1 | 3100 | glue_dir = get_glue_dir(dev); |
1da177e4 | 3101 | kobject_del(&dev->kobj); |
cebf8fd1 | 3102 | cleanup_glue_dir(dev, glue_dir); |
b8530017 | 3103 | memalloc_noio_restore(noio_flag); |
da231fd5 | 3104 | put_device(parent); |
1da177e4 | 3105 | } |
86df2687 | 3106 | EXPORT_SYMBOL_GPL(device_del); |
1da177e4 LT |
3107 | |
3108 | /** | |
4a3ad20c GKH |
3109 | * device_unregister - unregister device from system. |
3110 | * @dev: device going away. | |
1da177e4 | 3111 | * |
4a3ad20c GKH |
3112 | * We do this in two parts, like we do device_register(). First, |
3113 | * we remove it from all the subsystems with device_del(), then | |
3114 | * we decrement the reference count via put_device(). If that | |
3115 | * is the final reference count, the device will be cleaned up | |
3116 | * via device_release() above. Otherwise, the structure will | |
3117 | * stick around until the final reference to the device is dropped. | |
1da177e4 | 3118 | */ |
4a3ad20c | 3119 | void device_unregister(struct device *dev) |
1da177e4 | 3120 | { |
1e0b2cf9 | 3121 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); |
1da177e4 LT |
3122 | device_del(dev); |
3123 | put_device(dev); | |
3124 | } | |
86df2687 | 3125 | EXPORT_SYMBOL_GPL(device_unregister); |
1da177e4 | 3126 | |
3d060aeb AS |
3127 | static struct device *prev_device(struct klist_iter *i) |
3128 | { | |
3129 | struct klist_node *n = klist_prev(i); | |
3130 | struct device *dev = NULL; | |
3131 | struct device_private *p; | |
3132 | ||
3133 | if (n) { | |
3134 | p = to_device_private_parent(n); | |
3135 | dev = p->device; | |
3136 | } | |
3137 | return dev; | |
3138 | } | |
3139 | ||
4a3ad20c | 3140 | static struct device *next_device(struct klist_iter *i) |
36239577 | 3141 | { |
4a3ad20c | 3142 | struct klist_node *n = klist_next(i); |
f791b8c8 GKH |
3143 | struct device *dev = NULL; |
3144 | struct device_private *p; | |
3145 | ||
3146 | if (n) { | |
3147 | p = to_device_private_parent(n); | |
3148 | dev = p->device; | |
3149 | } | |
3150 | return dev; | |
36239577 PM |
3151 | } |
3152 | ||
6fcf53ac | 3153 | /** |
e454cea2 | 3154 | * device_get_devnode - path of device node file |
6fcf53ac | 3155 | * @dev: device |
e454cea2 | 3156 | * @mode: returned file access mode |
3c2670e6 KS |
3157 | * @uid: returned file owner |
3158 | * @gid: returned file group | |
6fcf53ac KS |
3159 | * @tmp: possibly allocated string |
3160 | * | |
3161 | * Return the relative path of a possible device node. | |
3162 | * Non-default names may need to allocate a memory to compose | |
3163 | * a name. This memory is returned in tmp and needs to be | |
3164 | * freed by the caller. | |
3165 | */ | |
e454cea2 | 3166 | const char *device_get_devnode(struct device *dev, |
4e4098a3 | 3167 | umode_t *mode, kuid_t *uid, kgid_t *gid, |
3c2670e6 | 3168 | const char **tmp) |
6fcf53ac KS |
3169 | { |
3170 | char *s; | |
3171 | ||
3172 | *tmp = NULL; | |
3173 | ||
3174 | /* the device type may provide a specific name */ | |
e454cea2 | 3175 | if (dev->type && dev->type->devnode) |
3c2670e6 | 3176 | *tmp = dev->type->devnode(dev, mode, uid, gid); |
6fcf53ac KS |
3177 | if (*tmp) |
3178 | return *tmp; | |
3179 | ||
3180 | /* the class may provide a specific name */ | |
e454cea2 KS |
3181 | if (dev->class && dev->class->devnode) |
3182 | *tmp = dev->class->devnode(dev, mode); | |
6fcf53ac KS |
3183 | if (*tmp) |
3184 | return *tmp; | |
3185 | ||
3186 | /* return name without allocation, tmp == NULL */ | |
3187 | if (strchr(dev_name(dev), '!') == NULL) | |
3188 | return dev_name(dev); | |
3189 | ||
3190 | /* replace '!' in the name with '/' */ | |
a29fd614 RV |
3191 | s = kstrdup(dev_name(dev), GFP_KERNEL); |
3192 | if (!s) | |
6fcf53ac | 3193 | return NULL; |
a29fd614 RV |
3194 | strreplace(s, '!', '/'); |
3195 | return *tmp = s; | |
6fcf53ac KS |
3196 | } |
3197 | ||
1da177e4 | 3198 | /** |
4a3ad20c GKH |
3199 | * device_for_each_child - device child iterator. |
3200 | * @parent: parent struct device. | |
4a3ad20c | 3201 | * @fn: function to be called for each device. |
f8878dcb | 3202 | * @data: data for the callback. |
1da177e4 | 3203 | * |
4a3ad20c GKH |
3204 | * Iterate over @parent's child devices, and call @fn for each, |
3205 | * passing it @data. | |
1da177e4 | 3206 | * |
4a3ad20c GKH |
3207 | * We check the return of @fn each time. If it returns anything |
3208 | * other than 0, we break out and return that value. | |
1da177e4 | 3209 | */ |
4a3ad20c GKH |
3210 | int device_for_each_child(struct device *parent, void *data, |
3211 | int (*fn)(struct device *dev, void *data)) | |
1da177e4 | 3212 | { |
36239577 | 3213 | struct klist_iter i; |
4a3ad20c | 3214 | struct device *child; |
1da177e4 LT |
3215 | int error = 0; |
3216 | ||
014c90db GKH |
3217 | if (!parent->p) |
3218 | return 0; | |
3219 | ||
f791b8c8 | 3220 | klist_iter_init(&parent->p->klist_children, &i); |
93ead7c9 | 3221 | while (!error && (child = next_device(&i))) |
36239577 PM |
3222 | error = fn(child, data); |
3223 | klist_iter_exit(&i); | |
1da177e4 LT |
3224 | return error; |
3225 | } | |
86df2687 | 3226 | EXPORT_SYMBOL_GPL(device_for_each_child); |
1da177e4 | 3227 | |
3d060aeb AS |
3228 | /** |
3229 | * device_for_each_child_reverse - device child iterator in reversed order. | |
3230 | * @parent: parent struct device. | |
3231 | * @fn: function to be called for each device. | |
3232 | * @data: data for the callback. | |
3233 | * | |
3234 | * Iterate over @parent's child devices, and call @fn for each, | |
3235 | * passing it @data. | |
3236 | * | |
3237 | * We check the return of @fn each time. If it returns anything | |
3238 | * other than 0, we break out and return that value. | |
3239 | */ | |
3240 | int device_for_each_child_reverse(struct device *parent, void *data, | |
3241 | int (*fn)(struct device *dev, void *data)) | |
3242 | { | |
3243 | struct klist_iter i; | |
3244 | struct device *child; | |
3245 | int error = 0; | |
3246 | ||
3247 | if (!parent->p) | |
3248 | return 0; | |
3249 | ||
3250 | klist_iter_init(&parent->p->klist_children, &i); | |
3251 | while ((child = prev_device(&i)) && !error) | |
3252 | error = fn(child, data); | |
3253 | klist_iter_exit(&i); | |
3254 | return error; | |
3255 | } | |
3256 | EXPORT_SYMBOL_GPL(device_for_each_child_reverse); | |
3257 | ||
5ab69981 CH |
3258 | /** |
3259 | * device_find_child - device iterator for locating a particular device. | |
3260 | * @parent: parent struct device | |
5ab69981 | 3261 | * @match: Callback function to check device |
f8878dcb | 3262 | * @data: Data to pass to match function |
5ab69981 CH |
3263 | * |
3264 | * This is similar to the device_for_each_child() function above, but it | |
3265 | * returns a reference to a device that is 'found' for later use, as | |
3266 | * determined by the @match callback. | |
3267 | * | |
3268 | * The callback should return 0 if the device doesn't match and non-zero | |
3269 | * if it does. If the callback returns non-zero and a reference to the | |
3270 | * current device can be obtained, this function will return to the caller | |
3271 | * and not iterate over any more devices. | |
a4e2400a FV |
3272 | * |
3273 | * NOTE: you will need to drop the reference with put_device() after use. | |
5ab69981 | 3274 | */ |
4a3ad20c GKH |
3275 | struct device *device_find_child(struct device *parent, void *data, |
3276 | int (*match)(struct device *dev, void *data)) | |
5ab69981 CH |
3277 | { |
3278 | struct klist_iter i; | |
3279 | struct device *child; | |
3280 | ||
3281 | if (!parent) | |
3282 | return NULL; | |
3283 | ||
f791b8c8 | 3284 | klist_iter_init(&parent->p->klist_children, &i); |
5ab69981 CH |
3285 | while ((child = next_device(&i))) |
3286 | if (match(child, data) && get_device(child)) | |
3287 | break; | |
3288 | klist_iter_exit(&i); | |
3289 | return child; | |
3290 | } | |
86df2687 | 3291 | EXPORT_SYMBOL_GPL(device_find_child); |
5ab69981 | 3292 | |
dad9bb01 HK |
3293 | /** |
3294 | * device_find_child_by_name - device iterator for locating a child device. | |
3295 | * @parent: parent struct device | |
3296 | * @name: name of the child device | |
3297 | * | |
3298 | * This is similar to the device_find_child() function above, but it | |
3299 | * returns a reference to a device that has the name @name. | |
3300 | * | |
3301 | * NOTE: you will need to drop the reference with put_device() after use. | |
3302 | */ | |
3303 | struct device *device_find_child_by_name(struct device *parent, | |
3304 | const char *name) | |
3305 | { | |
3306 | struct klist_iter i; | |
3307 | struct device *child; | |
3308 | ||
3309 | if (!parent) | |
3310 | return NULL; | |
3311 | ||
3312 | klist_iter_init(&parent->p->klist_children, &i); | |
3313 | while ((child = next_device(&i))) | |
c77f520d | 3314 | if (sysfs_streq(dev_name(child), name) && get_device(child)) |
dad9bb01 HK |
3315 | break; |
3316 | klist_iter_exit(&i); | |
3317 | return child; | |
3318 | } | |
3319 | EXPORT_SYMBOL_GPL(device_find_child_by_name); | |
3320 | ||
1da177e4 LT |
3321 | int __init devices_init(void) |
3322 | { | |
881c6cfd GKH |
3323 | devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); |
3324 | if (!devices_kset) | |
3325 | return -ENOMEM; | |
e105b8bf DW |
3326 | dev_kobj = kobject_create_and_add("dev", NULL); |
3327 | if (!dev_kobj) | |
3328 | goto dev_kobj_err; | |
3329 | sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); | |
3330 | if (!sysfs_dev_block_kobj) | |
3331 | goto block_kobj_err; | |
3332 | sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); | |
3333 | if (!sysfs_dev_char_kobj) | |
3334 | goto char_kobj_err; | |
3335 | ||
881c6cfd | 3336 | return 0; |
e105b8bf DW |
3337 | |
3338 | char_kobj_err: | |
3339 | kobject_put(sysfs_dev_block_kobj); | |
3340 | block_kobj_err: | |
3341 | kobject_put(dev_kobj); | |
3342 | dev_kobj_err: | |
3343 | kset_unregister(devices_kset); | |
3344 | return -ENOMEM; | |
1da177e4 LT |
3345 | } |
3346 | ||
4f3549d7 RW |
3347 | static int device_check_offline(struct device *dev, void *not_used) |
3348 | { | |
3349 | int ret; | |
3350 | ||
3351 | ret = device_for_each_child(dev, NULL, device_check_offline); | |
3352 | if (ret) | |
3353 | return ret; | |
3354 | ||
3355 | return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; | |
3356 | } | |
3357 | ||
3358 | /** | |
3359 | * device_offline - Prepare the device for hot-removal. | |
3360 | * @dev: Device to be put offline. | |
3361 | * | |
3362 | * Execute the device bus type's .offline() callback, if present, to prepare | |
3363 | * the device for a subsequent hot-removal. If that succeeds, the device must | |
3364 | * not be used until either it is removed or its bus type's .online() callback | |
3365 | * is executed. | |
3366 | * | |
3367 | * Call under device_hotplug_lock. | |
3368 | */ | |
3369 | int device_offline(struct device *dev) | |
3370 | { | |
3371 | int ret; | |
3372 | ||
3373 | if (dev->offline_disabled) | |
3374 | return -EPERM; | |
3375 | ||
3376 | ret = device_for_each_child(dev, NULL, device_check_offline); | |
3377 | if (ret) | |
3378 | return ret; | |
3379 | ||
3380 | device_lock(dev); | |
3381 | if (device_supports_offline(dev)) { | |
3382 | if (dev->offline) { | |
3383 | ret = 1; | |
3384 | } else { | |
3385 | ret = dev->bus->offline(dev); | |
3386 | if (!ret) { | |
3387 | kobject_uevent(&dev->kobj, KOBJ_OFFLINE); | |
3388 | dev->offline = true; | |
3389 | } | |
3390 | } | |
3391 | } | |
3392 | device_unlock(dev); | |
3393 | ||
3394 | return ret; | |
3395 | } | |
3396 | ||
3397 | /** | |
3398 | * device_online - Put the device back online after successful device_offline(). | |
3399 | * @dev: Device to be put back online. | |
3400 | * | |
3401 | * If device_offline() has been successfully executed for @dev, but the device | |
3402 | * has not been removed subsequently, execute its bus type's .online() callback | |
3403 | * to indicate that the device can be used again. | |
3404 | * | |
3405 | * Call under device_hotplug_lock. | |
3406 | */ | |
3407 | int device_online(struct device *dev) | |
3408 | { | |
3409 | int ret = 0; | |
3410 | ||
3411 | device_lock(dev); | |
3412 | if (device_supports_offline(dev)) { | |
3413 | if (dev->offline) { | |
3414 | ret = dev->bus->online(dev); | |
3415 | if (!ret) { | |
3416 | kobject_uevent(&dev->kobj, KOBJ_ONLINE); | |
3417 | dev->offline = false; | |
3418 | } | |
3419 | } else { | |
3420 | ret = 1; | |
3421 | } | |
3422 | } | |
3423 | device_unlock(dev); | |
3424 | ||
3425 | return ret; | |
3426 | } | |
3427 | ||
7f100d15 | 3428 | struct root_device { |
0aa0dc41 MM |
3429 | struct device dev; |
3430 | struct module *owner; | |
3431 | }; | |
3432 | ||
93058424 | 3433 | static inline struct root_device *to_root_device(struct device *d) |
481e2079 FW |
3434 | { |
3435 | return container_of(d, struct root_device, dev); | |
3436 | } | |
0aa0dc41 MM |
3437 | |
3438 | static void root_device_release(struct device *dev) | |
3439 | { | |
3440 | kfree(to_root_device(dev)); | |
3441 | } | |
3442 | ||
3443 | /** | |
3444 | * __root_device_register - allocate and register a root device | |
3445 | * @name: root device name | |
3446 | * @owner: owner module of the root device, usually THIS_MODULE | |
3447 | * | |
3448 | * This function allocates a root device and registers it | |
3449 | * using device_register(). In order to free the returned | |
3450 | * device, use root_device_unregister(). | |
3451 | * | |
3452 | * Root devices are dummy devices which allow other devices | |
3453 | * to be grouped under /sys/devices. Use this function to | |
3454 | * allocate a root device and then use it as the parent of | |
3455 | * any device which should appear under /sys/devices/{name} | |
3456 | * | |
3457 | * The /sys/devices/{name} directory will also contain a | |
3458 | * 'module' symlink which points to the @owner directory | |
3459 | * in sysfs. | |
3460 | * | |
f0eae0ed JN |
3461 | * Returns &struct device pointer on success, or ERR_PTR() on error. |
3462 | * | |
0aa0dc41 MM |
3463 | * Note: You probably want to use root_device_register(). |
3464 | */ | |
3465 | struct device *__root_device_register(const char *name, struct module *owner) | |
3466 | { | |
3467 | struct root_device *root; | |
3468 | int err = -ENOMEM; | |
3469 | ||
3470 | root = kzalloc(sizeof(struct root_device), GFP_KERNEL); | |
3471 | if (!root) | |
3472 | return ERR_PTR(err); | |
3473 | ||
acc0e90f | 3474 | err = dev_set_name(&root->dev, "%s", name); |
0aa0dc41 MM |
3475 | if (err) { |
3476 | kfree(root); | |
3477 | return ERR_PTR(err); | |
3478 | } | |
3479 | ||
3480 | root->dev.release = root_device_release; | |
3481 | ||
3482 | err = device_register(&root->dev); | |
3483 | if (err) { | |
3484 | put_device(&root->dev); | |
3485 | return ERR_PTR(err); | |
3486 | } | |
3487 | ||
1d9e882b | 3488 | #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ |
0aa0dc41 MM |
3489 | if (owner) { |
3490 | struct module_kobject *mk = &owner->mkobj; | |
3491 | ||
3492 | err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); | |
3493 | if (err) { | |
3494 | device_unregister(&root->dev); | |
3495 | return ERR_PTR(err); | |
3496 | } | |
3497 | root->owner = owner; | |
3498 | } | |
3499 | #endif | |
3500 | ||
3501 | return &root->dev; | |
3502 | } | |
3503 | EXPORT_SYMBOL_GPL(__root_device_register); | |
3504 | ||
3505 | /** | |
3506 | * root_device_unregister - unregister and free a root device | |
7cbcf225 | 3507 | * @dev: device going away |
0aa0dc41 MM |
3508 | * |
3509 | * This function unregisters and cleans up a device that was created by | |
3510 | * root_device_register(). | |
3511 | */ | |
3512 | void root_device_unregister(struct device *dev) | |
3513 | { | |
3514 | struct root_device *root = to_root_device(dev); | |
3515 | ||
3516 | if (root->owner) | |
3517 | sysfs_remove_link(&root->dev.kobj, "module"); | |
3518 | ||
3519 | device_unregister(dev); | |
3520 | } | |
3521 | EXPORT_SYMBOL_GPL(root_device_unregister); | |
3522 | ||
23681e47 GKH |
3523 | |
3524 | static void device_create_release(struct device *dev) | |
3525 | { | |
1e0b2cf9 | 3526 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); |
23681e47 GKH |
3527 | kfree(dev); |
3528 | } | |
3529 | ||
6a8b55d7 | 3530 | static __printf(6, 0) struct device * |
39ef3112 GR |
3531 | device_create_groups_vargs(struct class *class, struct device *parent, |
3532 | dev_t devt, void *drvdata, | |
3533 | const struct attribute_group **groups, | |
3534 | const char *fmt, va_list args) | |
23681e47 | 3535 | { |
23681e47 GKH |
3536 | struct device *dev = NULL; |
3537 | int retval = -ENODEV; | |
3538 | ||
3539 | if (class == NULL || IS_ERR(class)) | |
3540 | goto error; | |
23681e47 GKH |
3541 | |
3542 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | |
3543 | if (!dev) { | |
3544 | retval = -ENOMEM; | |
3545 | goto error; | |
3546 | } | |
3547 | ||
bbc780f8 | 3548 | device_initialize(dev); |
23681e47 GKH |
3549 | dev->devt = devt; |
3550 | dev->class = class; | |
3551 | dev->parent = parent; | |
39ef3112 | 3552 | dev->groups = groups; |
23681e47 | 3553 | dev->release = device_create_release; |
8882b394 | 3554 | dev_set_drvdata(dev, drvdata); |
23681e47 | 3555 | |
1fa5ae85 KS |
3556 | retval = kobject_set_name_vargs(&dev->kobj, fmt, args); |
3557 | if (retval) | |
3558 | goto error; | |
3559 | ||
bbc780f8 | 3560 | retval = device_add(dev); |
23681e47 GKH |
3561 | if (retval) |
3562 | goto error; | |
3563 | ||
23681e47 GKH |
3564 | return dev; |
3565 | ||
3566 | error: | |
286661b3 | 3567 | put_device(dev); |
23681e47 GKH |
3568 | return ERR_PTR(retval); |
3569 | } | |
39ef3112 | 3570 | |
8882b394 | 3571 | /** |
4e106739 | 3572 | * device_create - creates a device and registers it with sysfs |
8882b394 GKH |
3573 | * @class: pointer to the struct class that this device should be registered to |
3574 | * @parent: pointer to the parent struct device of this new device, if any | |
3575 | * @devt: the dev_t for the char device to be added | |
3576 | * @drvdata: the data to be added to the device for callbacks | |
3577 | * @fmt: string for the device's name | |
3578 | * | |
3579 | * This function can be used by char device classes. A struct device | |
3580 | * will be created in sysfs, registered to the specified class. | |
3581 | * | |
3582 | * A "dev" file will be created, showing the dev_t for the device, if | |
3583 | * the dev_t is not 0,0. | |
3584 | * If a pointer to a parent struct device is passed in, the newly created | |
3585 | * struct device will be a child of that device in sysfs. | |
3586 | * The pointer to the struct device will be returned from the call. | |
3587 | * Any further sysfs files that might be required can be created using this | |
3588 | * pointer. | |
3589 | * | |
f0eae0ed JN |
3590 | * Returns &struct device pointer on success, or ERR_PTR() on error. |
3591 | * | |
8882b394 GKH |
3592 | * Note: the struct class passed to this function must have previously |
3593 | * been created with a call to class_create(). | |
3594 | */ | |
4e106739 GKH |
3595 | struct device *device_create(struct class *class, struct device *parent, |
3596 | dev_t devt, void *drvdata, const char *fmt, ...) | |
8882b394 GKH |
3597 | { |
3598 | va_list vargs; | |
3599 | struct device *dev; | |
3600 | ||
3601 | va_start(vargs, fmt); | |
4c747466 CH |
3602 | dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, |
3603 | fmt, vargs); | |
8882b394 GKH |
3604 | va_end(vargs); |
3605 | return dev; | |
3606 | } | |
4e106739 | 3607 | EXPORT_SYMBOL_GPL(device_create); |
8882b394 | 3608 | |
39ef3112 GR |
3609 | /** |
3610 | * device_create_with_groups - creates a device and registers it with sysfs | |
3611 | * @class: pointer to the struct class that this device should be registered to | |
3612 | * @parent: pointer to the parent struct device of this new device, if any | |
3613 | * @devt: the dev_t for the char device to be added | |
3614 | * @drvdata: the data to be added to the device for callbacks | |
3615 | * @groups: NULL-terminated list of attribute groups to be created | |
3616 | * @fmt: string for the device's name | |
3617 | * | |
3618 | * This function can be used by char device classes. A struct device | |
3619 | * will be created in sysfs, registered to the specified class. | |
3620 | * Additional attributes specified in the groups parameter will also | |
3621 | * be created automatically. | |
3622 | * | |
3623 | * A "dev" file will be created, showing the dev_t for the device, if | |
3624 | * the dev_t is not 0,0. | |
3625 | * If a pointer to a parent struct device is passed in, the newly created | |
3626 | * struct device will be a child of that device in sysfs. | |
3627 | * The pointer to the struct device will be returned from the call. | |
3628 | * Any further sysfs files that might be required can be created using this | |
3629 | * pointer. | |
3630 | * | |
3631 | * Returns &struct device pointer on success, or ERR_PTR() on error. | |
3632 | * | |
3633 | * Note: the struct class passed to this function must have previously | |
3634 | * been created with a call to class_create(). | |
3635 | */ | |
3636 | struct device *device_create_with_groups(struct class *class, | |
3637 | struct device *parent, dev_t devt, | |
3638 | void *drvdata, | |
3639 | const struct attribute_group **groups, | |
3640 | const char *fmt, ...) | |
3641 | { | |
3642 | va_list vargs; | |
3643 | struct device *dev; | |
3644 | ||
3645 | va_start(vargs, fmt); | |
3646 | dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, | |
3647 | fmt, vargs); | |
3648 | va_end(vargs); | |
3649 | return dev; | |
3650 | } | |
3651 | EXPORT_SYMBOL_GPL(device_create_with_groups); | |
3652 | ||
775b64d2 RW |
3653 | /** |
3654 | * device_destroy - removes a device that was created with device_create() | |
3655 | * @class: pointer to the struct class that this device was registered with | |
3656 | * @devt: the dev_t of the device that was previously registered | |
3657 | * | |
3658 | * This call unregisters and cleans up a device that was created with a | |
3659 | * call to device_create(). | |
3660 | */ | |
3661 | void device_destroy(struct class *class, dev_t devt) | |
3662 | { | |
3663 | struct device *dev; | |
23681e47 | 3664 | |
4495dfdd | 3665 | dev = class_find_device_by_devt(class, devt); |
cd35449b DY |
3666 | if (dev) { |
3667 | put_device(dev); | |
23681e47 | 3668 | device_unregister(dev); |
cd35449b | 3669 | } |
23681e47 GKH |
3670 | } |
3671 | EXPORT_SYMBOL_GPL(device_destroy); | |
a2de48ca GKH |
3672 | |
3673 | /** | |
3674 | * device_rename - renames a device | |
3675 | * @dev: the pointer to the struct device to be renamed | |
3676 | * @new_name: the new name of the device | |
030c1d2b EB |
3677 | * |
3678 | * It is the responsibility of the caller to provide mutual | |
3679 | * exclusion between two different calls of device_rename | |
3680 | * on the same device to ensure that new_name is valid and | |
3681 | * won't conflict with other devices. | |
c6c0ac66 | 3682 | * |
a5462516 TT |
3683 | * Note: Don't call this function. Currently, the networking layer calls this |
3684 | * function, but that will change. The following text from Kay Sievers offers | |
3685 | * some insight: | |
3686 | * | |
3687 | * Renaming devices is racy at many levels, symlinks and other stuff are not | |
3688 | * replaced atomically, and you get a "move" uevent, but it's not easy to | |
3689 | * connect the event to the old and new device. Device nodes are not renamed at | |
3690 | * all, there isn't even support for that in the kernel now. | |
3691 | * | |
3692 | * In the meantime, during renaming, your target name might be taken by another | |
3693 | * driver, creating conflicts. Or the old name is taken directly after you | |
3694 | * renamed it -- then you get events for the same DEVPATH, before you even see | |
3695 | * the "move" event. It's just a mess, and nothing new should ever rely on | |
3696 | * kernel device renaming. Besides that, it's not even implemented now for | |
3697 | * other things than (driver-core wise very simple) network devices. | |
3698 | * | |
3699 | * We are currently about to change network renaming in udev to completely | |
3700 | * disallow renaming of devices in the same namespace as the kernel uses, | |
3701 | * because we can't solve the problems properly, that arise with swapping names | |
3702 | * of multiple interfaces without races. Means, renaming of eth[0-9]* will only | |
3703 | * be allowed to some other name than eth[0-9]*, for the aforementioned | |
3704 | * reasons. | |
3705 | * | |
3706 | * Make up a "real" name in the driver before you register anything, or add | |
3707 | * some other attributes for userspace to find the device, or use udev to add | |
3708 | * symlinks -- but never rename kernel devices later, it's a complete mess. We | |
3709 | * don't even want to get into that and try to implement the missing pieces in | |
3710 | * the core. We really have other pieces to fix in the driver core mess. :) | |
a2de48ca | 3711 | */ |
6937e8f8 | 3712 | int device_rename(struct device *dev, const char *new_name) |
a2de48ca | 3713 | { |
4b30ee58 | 3714 | struct kobject *kobj = &dev->kobj; |
2ee97caf | 3715 | char *old_device_name = NULL; |
a2de48ca GKH |
3716 | int error; |
3717 | ||
3718 | dev = get_device(dev); | |
3719 | if (!dev) | |
3720 | return -EINVAL; | |
3721 | ||
69df7533 | 3722 | dev_dbg(dev, "renaming to %s\n", new_name); |
a2de48ca | 3723 | |
1fa5ae85 | 3724 | old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); |
2ee97caf CH |
3725 | if (!old_device_name) { |
3726 | error = -ENOMEM; | |
3727 | goto out; | |
a2de48ca | 3728 | } |
a2de48ca | 3729 | |
f349cf34 | 3730 | if (dev->class) { |
4b30ee58 TH |
3731 | error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, |
3732 | kobj, old_device_name, | |
3733 | new_name, kobject_namespace(kobj)); | |
f349cf34 EB |
3734 | if (error) |
3735 | goto out; | |
3736 | } | |
39aba963 | 3737 | |
4b30ee58 | 3738 | error = kobject_rename(kobj, new_name); |
1fa5ae85 | 3739 | if (error) |
2ee97caf | 3740 | goto out; |
a2de48ca | 3741 | |
2ee97caf | 3742 | out: |
a2de48ca GKH |
3743 | put_device(dev); |
3744 | ||
2ee97caf | 3745 | kfree(old_device_name); |
a2de48ca GKH |
3746 | |
3747 | return error; | |
3748 | } | |
a2807dbc | 3749 | EXPORT_SYMBOL_GPL(device_rename); |
8a82472f CH |
3750 | |
3751 | static int device_move_class_links(struct device *dev, | |
3752 | struct device *old_parent, | |
3753 | struct device *new_parent) | |
3754 | { | |
f7f3461d | 3755 | int error = 0; |
8a82472f | 3756 | |
f7f3461d GKH |
3757 | if (old_parent) |
3758 | sysfs_remove_link(&dev->kobj, "device"); | |
3759 | if (new_parent) | |
3760 | error = sysfs_create_link(&dev->kobj, &new_parent->kobj, | |
3761 | "device"); | |
3762 | return error; | |
8a82472f CH |
3763 | } |
3764 | ||
3765 | /** | |
3766 | * device_move - moves a device to a new parent | |
3767 | * @dev: the pointer to the struct device to be moved | |
13509860 | 3768 | * @new_parent: the new parent of the device (can be NULL) |
ffa6a705 | 3769 | * @dpm_order: how to reorder the dpm_list |
8a82472f | 3770 | */ |
ffa6a705 CH |
3771 | int device_move(struct device *dev, struct device *new_parent, |
3772 | enum dpm_order dpm_order) | |
8a82472f CH |
3773 | { |
3774 | int error; | |
3775 | struct device *old_parent; | |
c744aeae | 3776 | struct kobject *new_parent_kobj; |
8a82472f CH |
3777 | |
3778 | dev = get_device(dev); | |
3779 | if (!dev) | |
3780 | return -EINVAL; | |
3781 | ||
ffa6a705 | 3782 | device_pm_lock(); |
8a82472f | 3783 | new_parent = get_device(new_parent); |
4a3ad20c | 3784 | new_parent_kobj = get_device_parent(dev, new_parent); |
84d0c27d TH |
3785 | if (IS_ERR(new_parent_kobj)) { |
3786 | error = PTR_ERR(new_parent_kobj); | |
3787 | put_device(new_parent); | |
3788 | goto out; | |
3789 | } | |
63b6971a | 3790 | |
1e0b2cf9 KS |
3791 | pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), |
3792 | __func__, new_parent ? dev_name(new_parent) : "<NULL>"); | |
c744aeae | 3793 | error = kobject_move(&dev->kobj, new_parent_kobj); |
8a82472f | 3794 | if (error) { |
63b6971a | 3795 | cleanup_glue_dir(dev, new_parent_kobj); |
8a82472f CH |
3796 | put_device(new_parent); |
3797 | goto out; | |
3798 | } | |
3799 | old_parent = dev->parent; | |
3800 | dev->parent = new_parent; | |
3801 | if (old_parent) | |
f791b8c8 | 3802 | klist_remove(&dev->p->knode_parent); |
0d358f22 | 3803 | if (new_parent) { |
f791b8c8 GKH |
3804 | klist_add_tail(&dev->p->knode_parent, |
3805 | &new_parent->p->klist_children); | |
0d358f22 YL |
3806 | set_dev_node(dev, dev_to_node(new_parent)); |
3807 | } | |
3808 | ||
bdd4034d RV |
3809 | if (dev->class) { |
3810 | error = device_move_class_links(dev, old_parent, new_parent); | |
3811 | if (error) { | |
3812 | /* We ignore errors on cleanup since we're hosed anyway... */ | |
3813 | device_move_class_links(dev, new_parent, old_parent); | |
3814 | if (!kobject_move(&dev->kobj, &old_parent->kobj)) { | |
3815 | if (new_parent) | |
3816 | klist_remove(&dev->p->knode_parent); | |
3817 | dev->parent = old_parent; | |
3818 | if (old_parent) { | |
3819 | klist_add_tail(&dev->p->knode_parent, | |
3820 | &old_parent->p->klist_children); | |
3821 | set_dev_node(dev, dev_to_node(old_parent)); | |
3822 | } | |
0d358f22 | 3823 | } |
bdd4034d RV |
3824 | cleanup_glue_dir(dev, new_parent_kobj); |
3825 | put_device(new_parent); | |
3826 | goto out; | |
8a82472f | 3827 | } |
8a82472f | 3828 | } |
ffa6a705 CH |
3829 | switch (dpm_order) { |
3830 | case DPM_ORDER_NONE: | |
3831 | break; | |
3832 | case DPM_ORDER_DEV_AFTER_PARENT: | |
3833 | device_pm_move_after(dev, new_parent); | |
52cdbdd4 | 3834 | devices_kset_move_after(dev, new_parent); |
ffa6a705 CH |
3835 | break; |
3836 | case DPM_ORDER_PARENT_BEFORE_DEV: | |
3837 | device_pm_move_before(new_parent, dev); | |
52cdbdd4 | 3838 | devices_kset_move_before(new_parent, dev); |
ffa6a705 CH |
3839 | break; |
3840 | case DPM_ORDER_DEV_LAST: | |
3841 | device_pm_move_last(dev); | |
52cdbdd4 | 3842 | devices_kset_move_last(dev); |
ffa6a705 CH |
3843 | break; |
3844 | } | |
bdd4034d | 3845 | |
8a82472f CH |
3846 | put_device(old_parent); |
3847 | out: | |
ffa6a705 | 3848 | device_pm_unlock(); |
8a82472f CH |
3849 | put_device(dev); |
3850 | return error; | |
3851 | } | |
8a82472f | 3852 | EXPORT_SYMBOL_GPL(device_move); |
37b0c020 | 3853 | |
b8f33e5d CB |
3854 | static int device_attrs_change_owner(struct device *dev, kuid_t kuid, |
3855 | kgid_t kgid) | |
3856 | { | |
3857 | struct kobject *kobj = &dev->kobj; | |
3858 | struct class *class = dev->class; | |
3859 | const struct device_type *type = dev->type; | |
3860 | int error; | |
3861 | ||
3862 | if (class) { | |
3863 | /* | |
3864 | * Change the device groups of the device class for @dev to | |
3865 | * @kuid/@kgid. | |
3866 | */ | |
3867 | error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, | |
3868 | kgid); | |
3869 | if (error) | |
3870 | return error; | |
3871 | } | |
3872 | ||
3873 | if (type) { | |
3874 | /* | |
3875 | * Change the device groups of the device type for @dev to | |
3876 | * @kuid/@kgid. | |
3877 | */ | |
3878 | error = sysfs_groups_change_owner(kobj, type->groups, kuid, | |
3879 | kgid); | |
3880 | if (error) | |
3881 | return error; | |
3882 | } | |
3883 | ||
3884 | /* Change the device groups of @dev to @kuid/@kgid. */ | |
3885 | error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); | |
3886 | if (error) | |
3887 | return error; | |
3888 | ||
3889 | if (device_supports_offline(dev) && !dev->offline_disabled) { | |
3890 | /* Change online device attributes of @dev to @kuid/@kgid. */ | |
3891 | error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, | |
3892 | kuid, kgid); | |
3893 | if (error) | |
3894 | return error; | |
3895 | } | |
3896 | ||
3897 | return 0; | |
3898 | } | |
3899 | ||
3900 | /** | |
3901 | * device_change_owner - change the owner of an existing device. | |
3902 | * @dev: device. | |
3903 | * @kuid: new owner's kuid | |
3904 | * @kgid: new owner's kgid | |
3905 | * | |
3906 | * This changes the owner of @dev and its corresponding sysfs entries to | |
3907 | * @kuid/@kgid. This function closely mirrors how @dev was added via driver | |
3908 | * core. | |
3909 | * | |
3910 | * Returns 0 on success or error code on failure. | |
3911 | */ | |
3912 | int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) | |
3913 | { | |
3914 | int error; | |
3915 | struct kobject *kobj = &dev->kobj; | |
3916 | ||
3917 | dev = get_device(dev); | |
3918 | if (!dev) | |
3919 | return -EINVAL; | |
3920 | ||
3921 | /* | |
3922 | * Change the kobject and the default attributes and groups of the | |
3923 | * ktype associated with it to @kuid/@kgid. | |
3924 | */ | |
3925 | error = sysfs_change_owner(kobj, kuid, kgid); | |
3926 | if (error) | |
3927 | goto out; | |
3928 | ||
3929 | /* | |
3930 | * Change the uevent file for @dev to the new owner. The uevent file | |
3931 | * was created in a separate step when @dev got added and we mirror | |
3932 | * that step here. | |
3933 | */ | |
3934 | error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, | |
3935 | kgid); | |
3936 | if (error) | |
3937 | goto out; | |
3938 | ||
3939 | /* | |
3940 | * Change the device groups, the device groups associated with the | |
3941 | * device class, and the groups associated with the device type of @dev | |
3942 | * to @kuid/@kgid. | |
3943 | */ | |
3944 | error = device_attrs_change_owner(dev, kuid, kgid); | |
3945 | if (error) | |
3946 | goto out; | |
3947 | ||
3b52fc5d CB |
3948 | error = dpm_sysfs_change_owner(dev, kuid, kgid); |
3949 | if (error) | |
3950 | goto out; | |
3951 | ||
b8f33e5d CB |
3952 | #ifdef CONFIG_BLOCK |
3953 | if (sysfs_deprecated && dev->class == &block_class) | |
3954 | goto out; | |
3955 | #endif | |
3956 | ||
3957 | /* | |
3958 | * Change the owner of the symlink located in the class directory of | |
3959 | * the device class associated with @dev which points to the actual | |
3960 | * directory entry for @dev to @kuid/@kgid. This ensures that the | |
3961 | * symlink shows the same permissions as its target. | |
3962 | */ | |
3963 | error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, | |
3964 | dev_name(dev), kuid, kgid); | |
3965 | if (error) | |
3966 | goto out; | |
3967 | ||
3968 | out: | |
3969 | put_device(dev); | |
3970 | return error; | |
3971 | } | |
3972 | EXPORT_SYMBOL_GPL(device_change_owner); | |
3973 | ||
37b0c020 GKH |
3974 | /** |
3975 | * device_shutdown - call ->shutdown() on each device to shutdown. | |
3976 | */ | |
3977 | void device_shutdown(void) | |
3978 | { | |
f123db8e | 3979 | struct device *dev, *parent; |
6245838f | 3980 | |
3297c8fc PL |
3981 | wait_for_device_probe(); |
3982 | device_block_probing(); | |
3983 | ||
65650b35 RW |
3984 | cpufreq_suspend(); |
3985 | ||
6245838f HD |
3986 | spin_lock(&devices_kset->list_lock); |
3987 | /* | |
3988 | * Walk the devices list backward, shutting down each in turn. | |
3989 | * Beware that device unplug events may also start pulling | |
3990 | * devices offline, even as the system is shutting down. | |
3991 | */ | |
3992 | while (!list_empty(&devices_kset->list)) { | |
3993 | dev = list_entry(devices_kset->list.prev, struct device, | |
3994 | kobj.entry); | |
d1c6c030 ML |
3995 | |
3996 | /* | |
3997 | * hold reference count of device's parent to | |
3998 | * prevent it from being freed because parent's | |
3999 | * lock is to be held | |
4000 | */ | |
f123db8e | 4001 | parent = get_device(dev->parent); |
6245838f HD |
4002 | get_device(dev); |
4003 | /* | |
4004 | * Make sure the device is off the kset list, in the | |
4005 | * event that dev->*->shutdown() doesn't remove it. | |
4006 | */ | |
4007 | list_del_init(&dev->kobj.entry); | |
4008 | spin_unlock(&devices_kset->list_lock); | |
fe6b91f4 | 4009 | |
d1c6c030 | 4010 | /* hold lock to avoid race with probe/release */ |
f123db8e BL |
4011 | if (parent) |
4012 | device_lock(parent); | |
d1c6c030 ML |
4013 | device_lock(dev); |
4014 | ||
fe6b91f4 AS |
4015 | /* Don't allow any more runtime suspends */ |
4016 | pm_runtime_get_noresume(dev); | |
4017 | pm_runtime_barrier(dev); | |
37b0c020 | 4018 | |
7521621e | 4019 | if (dev->class && dev->class->shutdown_pre) { |
f77af151 | 4020 | if (initcall_debug) |
7521621e MS |
4021 | dev_info(dev, "shutdown_pre\n"); |
4022 | dev->class->shutdown_pre(dev); | |
4023 | } | |
4024 | if (dev->bus && dev->bus->shutdown) { | |
0246c4fa SL |
4025 | if (initcall_debug) |
4026 | dev_info(dev, "shutdown\n"); | |
37b0c020 GKH |
4027 | dev->bus->shutdown(dev); |
4028 | } else if (dev->driver && dev->driver->shutdown) { | |
0246c4fa SL |
4029 | if (initcall_debug) |
4030 | dev_info(dev, "shutdown\n"); | |
37b0c020 GKH |
4031 | dev->driver->shutdown(dev); |
4032 | } | |
d1c6c030 ML |
4033 | |
4034 | device_unlock(dev); | |
f123db8e BL |
4035 | if (parent) |
4036 | device_unlock(parent); | |
d1c6c030 | 4037 | |
6245838f | 4038 | put_device(dev); |
f123db8e | 4039 | put_device(parent); |
6245838f HD |
4040 | |
4041 | spin_lock(&devices_kset->list_lock); | |
37b0c020 | 4042 | } |
6245838f | 4043 | spin_unlock(&devices_kset->list_lock); |
37b0c020 | 4044 | } |
99bcf217 JP |
4045 | |
4046 | /* | |
4047 | * Device logging functions | |
4048 | */ | |
4049 | ||
4050 | #ifdef CONFIG_PRINTK | |
74caba7f JO |
4051 | static void |
4052 | set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) | |
99bcf217 | 4053 | { |
c4e00daa | 4054 | const char *subsys; |
74caba7f JO |
4055 | |
4056 | memset(dev_info, 0, sizeof(*dev_info)); | |
99bcf217 | 4057 | |
c4e00daa KS |
4058 | if (dev->class) |
4059 | subsys = dev->class->name; | |
4060 | else if (dev->bus) | |
4061 | subsys = dev->bus->name; | |
4062 | else | |
74caba7f | 4063 | return; |
c4e00daa | 4064 | |
74caba7f | 4065 | strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); |
c4e00daa KS |
4066 | |
4067 | /* | |
4068 | * Add device identifier DEVICE=: | |
4069 | * b12:8 block dev_t | |
4070 | * c127:3 char dev_t | |
4071 | * n8 netdev ifindex | |
4072 | * +sound:card0 subsystem:devname | |
4073 | */ | |
4074 | if (MAJOR(dev->devt)) { | |
4075 | char c; | |
4076 | ||
4077 | if (strcmp(subsys, "block") == 0) | |
4078 | c = 'b'; | |
4079 | else | |
4080 | c = 'c'; | |
74caba7f JO |
4081 | |
4082 | snprintf(dev_info->device, sizeof(dev_info->device), | |
4083 | "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt)); | |
c4e00daa KS |
4084 | } else if (strcmp(subsys, "net") == 0) { |
4085 | struct net_device *net = to_net_dev(dev); | |
4086 | ||
74caba7f JO |
4087 | snprintf(dev_info->device, sizeof(dev_info->device), |
4088 | "n%u", net->ifindex); | |
c4e00daa | 4089 | } else { |
74caba7f JO |
4090 | snprintf(dev_info->device, sizeof(dev_info->device), |
4091 | "+%s:%s", subsys, dev_name(dev)); | |
c4e00daa | 4092 | } |
798efc60 | 4093 | } |
798efc60 | 4094 | |
05e4e5b8 JP |
4095 | int dev_vprintk_emit(int level, const struct device *dev, |
4096 | const char *fmt, va_list args) | |
4097 | { | |
74caba7f | 4098 | struct dev_printk_info dev_info; |
05e4e5b8 | 4099 | |
74caba7f | 4100 | set_dev_info(dev, &dev_info); |
05e4e5b8 | 4101 | |
74caba7f | 4102 | return vprintk_emit(0, level, &dev_info, fmt, args); |
05e4e5b8 JP |
4103 | } |
4104 | EXPORT_SYMBOL(dev_vprintk_emit); | |
4105 | ||
4106 | int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) | |
4107 | { | |
4108 | va_list args; | |
4109 | int r; | |
4110 | ||
4111 | va_start(args, fmt); | |
4112 | ||
4113 | r = dev_vprintk_emit(level, dev, fmt, args); | |
4114 | ||
4115 | va_end(args); | |
4116 | ||
4117 | return r; | |
4118 | } | |
4119 | EXPORT_SYMBOL(dev_printk_emit); | |
4120 | ||
d1f1052c | 4121 | static void __dev_printk(const char *level, const struct device *dev, |
798efc60 JP |
4122 | struct va_format *vaf) |
4123 | { | |
d1f1052c JP |
4124 | if (dev) |
4125 | dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", | |
4126 | dev_driver_string(dev), dev_name(dev), vaf); | |
4127 | else | |
4128 | printk("%s(NULL device *): %pV", level, vaf); | |
99bcf217 JP |
4129 | } |
4130 | ||
d1f1052c JP |
4131 | void dev_printk(const char *level, const struct device *dev, |
4132 | const char *fmt, ...) | |
99bcf217 JP |
4133 | { |
4134 | struct va_format vaf; | |
4135 | va_list args; | |
99bcf217 JP |
4136 | |
4137 | va_start(args, fmt); | |
4138 | ||
4139 | vaf.fmt = fmt; | |
4140 | vaf.va = &args; | |
4141 | ||
d1f1052c | 4142 | __dev_printk(level, dev, &vaf); |
798efc60 | 4143 | |
99bcf217 | 4144 | va_end(args); |
99bcf217 JP |
4145 | } |
4146 | EXPORT_SYMBOL(dev_printk); | |
4147 | ||
4148 | #define define_dev_printk_level(func, kern_level) \ | |
d1f1052c | 4149 | void func(const struct device *dev, const char *fmt, ...) \ |
99bcf217 JP |
4150 | { \ |
4151 | struct va_format vaf; \ | |
4152 | va_list args; \ | |
99bcf217 JP |
4153 | \ |
4154 | va_start(args, fmt); \ | |
4155 | \ | |
4156 | vaf.fmt = fmt; \ | |
4157 | vaf.va = &args; \ | |
4158 | \ | |
d1f1052c | 4159 | __dev_printk(kern_level, dev, &vaf); \ |
798efc60 | 4160 | \ |
99bcf217 | 4161 | va_end(args); \ |
99bcf217 JP |
4162 | } \ |
4163 | EXPORT_SYMBOL(func); | |
4164 | ||
663336ee JP |
4165 | define_dev_printk_level(_dev_emerg, KERN_EMERG); |
4166 | define_dev_printk_level(_dev_alert, KERN_ALERT); | |
4167 | define_dev_printk_level(_dev_crit, KERN_CRIT); | |
4168 | define_dev_printk_level(_dev_err, KERN_ERR); | |
4169 | define_dev_printk_level(_dev_warn, KERN_WARNING); | |
4170 | define_dev_printk_level(_dev_notice, KERN_NOTICE); | |
99bcf217 JP |
4171 | define_dev_printk_level(_dev_info, KERN_INFO); |
4172 | ||
4173 | #endif | |
97badf87 | 4174 | |
a787e540 AH |
4175 | /** |
4176 | * dev_err_probe - probe error check and log helper | |
4177 | * @dev: the pointer to the struct device | |
4178 | * @err: error value to test | |
4179 | * @fmt: printf-style format string | |
4180 | * @...: arguments as specified in the format string | |
4181 | * | |
4182 | * This helper implements common pattern present in probe functions for error | |
4183 | * checking: print debug or error message depending if the error value is | |
4184 | * -EPROBE_DEFER and propagate error upwards. | |
d090b70e AH |
4185 | * In case of -EPROBE_DEFER it sets also defer probe reason, which can be |
4186 | * checked later by reading devices_deferred debugfs attribute. | |
074b3aad MCC |
4187 | * It replaces code sequence:: |
4188 | * | |
a787e540 AH |
4189 | * if (err != -EPROBE_DEFER) |
4190 | * dev_err(dev, ...); | |
4191 | * else | |
4192 | * dev_dbg(dev, ...); | |
4193 | * return err; | |
074b3aad MCC |
4194 | * |
4195 | * with:: | |
4196 | * | |
a787e540 AH |
4197 | * return dev_err_probe(dev, err, ...); |
4198 | * | |
4199 | * Returns @err. | |
4200 | * | |
4201 | */ | |
4202 | int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) | |
4203 | { | |
4204 | struct va_format vaf; | |
4205 | va_list args; | |
4206 | ||
4207 | va_start(args, fmt); | |
4208 | vaf.fmt = fmt; | |
4209 | vaf.va = &args; | |
4210 | ||
d090b70e | 4211 | if (err != -EPROBE_DEFER) { |
693a8e93 | 4212 | dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); |
d090b70e AH |
4213 | } else { |
4214 | device_set_deferred_probe_reason(dev, &vaf); | |
693a8e93 | 4215 | dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); |
d090b70e | 4216 | } |
a787e540 AH |
4217 | |
4218 | va_end(args); | |
4219 | ||
4220 | return err; | |
4221 | } | |
4222 | EXPORT_SYMBOL_GPL(dev_err_probe); | |
4223 | ||
97badf87 RW |
4224 | static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) |
4225 | { | |
4226 | return fwnode && !IS_ERR(fwnode->secondary); | |
4227 | } | |
4228 | ||
4229 | /** | |
4230 | * set_primary_fwnode - Change the primary firmware node of a given device. | |
4231 | * @dev: Device to handle. | |
4232 | * @fwnode: New primary firmware node of the device. | |
4233 | * | |
4234 | * Set the device's firmware node pointer to @fwnode, but if a secondary | |
4235 | * firmware node of the device is present, preserve it. | |
4236 | */ | |
4237 | void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) | |
4238 | { | |
99aed922 | 4239 | struct device *parent = dev->parent; |
c15e1bdd | 4240 | struct fwnode_handle *fn = dev->fwnode; |
97badf87 | 4241 | |
c15e1bdd | 4242 | if (fwnode) { |
97badf87 RW |
4243 | if (fwnode_is_primary(fn)) |
4244 | fn = fn->secondary; | |
4245 | ||
55f89a8a MW |
4246 | if (fn) { |
4247 | WARN_ON(fwnode->secondary); | |
4248 | fwnode->secondary = fn; | |
4249 | } | |
97badf87 RW |
4250 | dev->fwnode = fwnode; |
4251 | } else { | |
c15e1bdd HK |
4252 | if (fwnode_is_primary(fn)) { |
4253 | dev->fwnode = fn->secondary; | |
99aed922 AS |
4254 | if (!(parent && fn == parent->fwnode)) |
4255 | fn->secondary = ERR_PTR(-ENODEV); | |
c15e1bdd HK |
4256 | } else { |
4257 | dev->fwnode = NULL; | |
4258 | } | |
97badf87 RW |
4259 | } |
4260 | } | |
4261 | EXPORT_SYMBOL_GPL(set_primary_fwnode); | |
4262 | ||
4263 | /** | |
4264 | * set_secondary_fwnode - Change the secondary firmware node of a given device. | |
4265 | * @dev: Device to handle. | |
4266 | * @fwnode: New secondary firmware node of the device. | |
4267 | * | |
4268 | * If a primary firmware node of the device is present, set its secondary | |
4269 | * pointer to @fwnode. Otherwise, set the device's firmware node pointer to | |
4270 | * @fwnode. | |
4271 | */ | |
4272 | void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) | |
4273 | { | |
4274 | if (fwnode) | |
4275 | fwnode->secondary = ERR_PTR(-ENODEV); | |
4276 | ||
4277 | if (fwnode_is_primary(dev->fwnode)) | |
4278 | dev->fwnode->secondary = fwnode; | |
4279 | else | |
4280 | dev->fwnode = fwnode; | |
4281 | } | |
96489ae1 | 4282 | EXPORT_SYMBOL_GPL(set_secondary_fwnode); |
4e75e1d7 JH |
4283 | |
4284 | /** | |
4285 | * device_set_of_node_from_dev - reuse device-tree node of another device | |
4286 | * @dev: device whose device-tree node is being set | |
4287 | * @dev2: device whose device-tree node is being reused | |
4288 | * | |
4289 | * Takes another reference to the new device-tree node after first dropping | |
4290 | * any reference held to the old node. | |
4291 | */ | |
4292 | void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) | |
4293 | { | |
4294 | of_node_put(dev->of_node); | |
4295 | dev->of_node = of_node_get(dev2->of_node); | |
4296 | dev->of_node_reused = true; | |
4297 | } | |
4298 | EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); | |
65b66682 | 4299 | |
6cda08a2 SP |
4300 | int device_match_name(struct device *dev, const void *name) |
4301 | { | |
4302 | return sysfs_streq(dev_name(dev), name); | |
4303 | } | |
4304 | EXPORT_SYMBOL_GPL(device_match_name); | |
4305 | ||
65b66682 SP |
4306 | int device_match_of_node(struct device *dev, const void *np) |
4307 | { | |
4308 | return dev->of_node == np; | |
4309 | } | |
4310 | EXPORT_SYMBOL_GPL(device_match_of_node); | |
67843bba SP |
4311 | |
4312 | int device_match_fwnode(struct device *dev, const void *fwnode) | |
4313 | { | |
4314 | return dev_fwnode(dev) == fwnode; | |
4315 | } | |
4316 | EXPORT_SYMBOL_GPL(device_match_fwnode); | |
4495dfdd SP |
4317 | |
4318 | int device_match_devt(struct device *dev, const void *pdevt) | |
4319 | { | |
4320 | return dev->devt == *(dev_t *)pdevt; | |
4321 | } | |
4322 | EXPORT_SYMBOL_GPL(device_match_devt); | |
00500147 SP |
4323 | |
4324 | int device_match_acpi_dev(struct device *dev, const void *adev) | |
4325 | { | |
4326 | return ACPI_COMPANION(dev) == adev; | |
4327 | } | |
4328 | EXPORT_SYMBOL(device_match_acpi_dev); | |
6bf85ba9 SP |
4329 | |
4330 | int device_match_any(struct device *dev, const void *unused) | |
4331 | { | |
4332 | return 1; | |
4333 | } | |
4334 | EXPORT_SYMBOL_GPL(device_match_any); |