]>
Commit | Line | Data |
---|---|---|
6afc0dc3 GL |
1 | /* |
2 | * Support for dynamic device trees. | |
3 | * | |
4 | * On some platforms, the device tree can be manipulated at runtime. | |
5 | * The routines in this section support adding, removing and changing | |
6 | * device tree nodes. | |
7 | */ | |
8 | ||
9 | #include <linux/of.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/proc_fs.h> | |
14 | ||
15 | #include "of_private.h" | |
16 | ||
17 | /** | |
18 | * of_node_get() - Increment refcount of a node | |
19 | * @node: Node to inc refcount, NULL is supported to simplify writing of | |
20 | * callers | |
21 | * | |
22 | * Returns node. | |
23 | */ | |
24 | struct device_node *of_node_get(struct device_node *node) | |
25 | { | |
26 | if (node) | |
27 | kobject_get(&node->kobj); | |
28 | return node; | |
29 | } | |
30 | EXPORT_SYMBOL(of_node_get); | |
31 | ||
32 | /** | |
33 | * of_node_put() - Decrement refcount of a node | |
34 | * @node: Node to dec refcount, NULL is supported to simplify writing of | |
35 | * callers | |
36 | */ | |
37 | void of_node_put(struct device_node *node) | |
38 | { | |
39 | if (node) | |
40 | kobject_put(&node->kobj); | |
41 | } | |
42 | EXPORT_SYMBOL(of_node_put); | |
43 | ||
44 | static void of_node_remove(struct device_node *np) | |
45 | { | |
46 | struct property *pp; | |
47 | ||
48 | BUG_ON(!of_node_is_initialized(np)); | |
49 | ||
50 | /* only remove properties if on sysfs */ | |
51 | if (of_node_is_attached(np)) { | |
52 | for_each_property_of_node(np, pp) | |
53 | sysfs_remove_bin_file(&np->kobj, &pp->attr); | |
54 | kobject_del(&np->kobj); | |
55 | } | |
56 | ||
57 | /* finally remove the kobj_init ref */ | |
58 | of_node_put(np); | |
59 | } | |
60 | ||
61 | static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); | |
62 | ||
63 | int of_reconfig_notifier_register(struct notifier_block *nb) | |
64 | { | |
65 | return blocking_notifier_chain_register(&of_reconfig_chain, nb); | |
66 | } | |
67 | EXPORT_SYMBOL_GPL(of_reconfig_notifier_register); | |
68 | ||
69 | int of_reconfig_notifier_unregister(struct notifier_block *nb) | |
70 | { | |
71 | return blocking_notifier_chain_unregister(&of_reconfig_chain, nb); | |
72 | } | |
73 | EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); | |
74 | ||
75 | int of_reconfig_notify(unsigned long action, void *p) | |
76 | { | |
77 | int rc; | |
78 | ||
79 | rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p); | |
80 | return notifier_to_errno(rc); | |
81 | } | |
82 | ||
83 | int of_property_notify(int action, struct device_node *np, | |
84 | struct property *prop) | |
85 | { | |
86 | struct of_prop_reconfig pr; | |
87 | ||
88 | /* only call notifiers if the node is attached */ | |
89 | if (!of_node_is_attached(np)) | |
90 | return 0; | |
91 | ||
92 | pr.dn = np; | |
93 | pr.prop = prop; | |
94 | return of_reconfig_notify(action, &pr); | |
95 | } | |
96 | ||
d8c50088 PA |
97 | void __of_attach_node(struct device_node *np) |
98 | { | |
99 | np->sibling = np->parent->child; | |
100 | np->allnext = np->parent->allnext; | |
101 | np->parent->allnext = np; | |
102 | np->parent->child = np; | |
103 | of_node_clear_flag(np, OF_DETACHED); | |
104 | } | |
105 | ||
6afc0dc3 GL |
106 | /** |
107 | * of_attach_node() - Plug a device node into the tree and global list. | |
108 | */ | |
109 | int of_attach_node(struct device_node *np) | |
110 | { | |
111 | unsigned long flags; | |
112 | int rc; | |
113 | ||
114 | rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np); | |
115 | if (rc) | |
116 | return rc; | |
117 | ||
118 | raw_spin_lock_irqsave(&devtree_lock, flags); | |
d8c50088 | 119 | __of_attach_node(np); |
6afc0dc3 GL |
120 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
121 | ||
122 | of_node_add(np); | |
123 | return 0; | |
124 | } | |
125 | ||
d8c50088 | 126 | void __of_detach_node(struct device_node *np) |
6afc0dc3 GL |
127 | { |
128 | struct device_node *parent; | |
6afc0dc3 | 129 | |
d8c50088 PA |
130 | if (WARN_ON(of_node_check_flag(np, OF_DETACHED))) |
131 | return; | |
6afc0dc3 GL |
132 | |
133 | parent = np->parent; | |
d8c50088 PA |
134 | if (WARN_ON(!parent)) |
135 | return; | |
6afc0dc3 GL |
136 | |
137 | if (of_allnodes == np) | |
138 | of_allnodes = np->allnext; | |
139 | else { | |
140 | struct device_node *prev; | |
141 | for (prev = of_allnodes; | |
142 | prev->allnext != np; | |
143 | prev = prev->allnext) | |
144 | ; | |
145 | prev->allnext = np->allnext; | |
146 | } | |
147 | ||
148 | if (parent->child == np) | |
149 | parent->child = np->sibling; | |
150 | else { | |
151 | struct device_node *prevsib; | |
152 | for (prevsib = np->parent->child; | |
153 | prevsib->sibling != np; | |
154 | prevsib = prevsib->sibling) | |
155 | ; | |
156 | prevsib->sibling = np->sibling; | |
157 | } | |
158 | ||
159 | of_node_set_flag(np, OF_DETACHED); | |
d8c50088 PA |
160 | } |
161 | ||
162 | /** | |
163 | * of_detach_node() - "Unplug" a node from the device tree. | |
164 | * | |
165 | * The caller must hold a reference to the node. The memory associated with | |
166 | * the node is not freed until its refcount goes to zero. | |
167 | */ | |
168 | int of_detach_node(struct device_node *np) | |
169 | { | |
170 | unsigned long flags; | |
171 | int rc = 0; | |
172 | ||
173 | rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np); | |
174 | if (rc) | |
175 | return rc; | |
176 | ||
177 | raw_spin_lock_irqsave(&devtree_lock, flags); | |
178 | __of_detach_node(np); | |
6afc0dc3 GL |
179 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
180 | ||
181 | of_node_remove(np); | |
182 | return rc; | |
183 | } | |
184 | ||
185 | /** | |
186 | * of_node_release() - release a dynamically allocated node | |
187 | * @kref: kref element of the node to be released | |
188 | * | |
189 | * In of_node_put() this function is passed to kref_put() as the destructor. | |
190 | */ | |
191 | void of_node_release(struct kobject *kobj) | |
192 | { | |
193 | struct device_node *node = kobj_to_device_node(kobj); | |
194 | struct property *prop = node->properties; | |
195 | ||
196 | /* We should never be releasing nodes that haven't been detached. */ | |
197 | if (!of_node_check_flag(node, OF_DETACHED)) { | |
198 | pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); | |
199 | dump_stack(); | |
200 | return; | |
201 | } | |
202 | ||
203 | if (!of_node_check_flag(node, OF_DYNAMIC)) | |
204 | return; | |
205 | ||
206 | while (prop) { | |
207 | struct property *next = prop->next; | |
208 | kfree(prop->name); | |
209 | kfree(prop->value); | |
210 | kfree(prop); | |
211 | prop = next; | |
212 | ||
213 | if (!prop) { | |
214 | prop = node->deadprops; | |
215 | node->deadprops = NULL; | |
216 | } | |
217 | } | |
218 | kfree(node->full_name); | |
219 | kfree(node->data); | |
220 | kfree(node); | |
221 | } | |
69843396 PA |
222 | |
223 | /** | |
224 | * __of_prop_dup - Copy a property dynamically. | |
225 | * @prop: Property to copy | |
226 | * @allocflags: Allocation flags (typically pass GFP_KERNEL) | |
227 | * | |
228 | * Copy a property by dynamically allocating the memory of both the | |
229 | * property stucture and the property name & contents. The property's | |
230 | * flags have the OF_DYNAMIC bit set so that we can differentiate between | |
231 | * dynamically allocated properties and not. | |
232 | * Returns the newly allocated property or NULL on out of memory error. | |
233 | */ | |
234 | struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags) | |
235 | { | |
236 | struct property *new; | |
237 | ||
238 | new = kzalloc(sizeof(*new), allocflags); | |
239 | if (!new) | |
240 | return NULL; | |
241 | ||
242 | /* | |
243 | * NOTE: There is no check for zero length value. | |
244 | * In case of a boolean property This will allocate a value | |
245 | * of zero bytes. We do this to work around the use | |
246 | * of of_get_property() calls on boolean values. | |
247 | */ | |
248 | new->name = kstrdup(prop->name, allocflags); | |
249 | new->value = kmemdup(prop->value, prop->length, allocflags); | |
250 | new->length = prop->length; | |
251 | if (!new->name || !new->value) | |
252 | goto err_free; | |
253 | ||
254 | /* mark the property as dynamic */ | |
255 | of_property_set_flag(new, OF_DYNAMIC); | |
256 | ||
257 | return new; | |
258 | ||
259 | err_free: | |
260 | kfree(new->name); | |
261 | kfree(new->value); | |
262 | kfree(new); | |
263 | return NULL; | |
264 | } | |
265 | ||
266 | /** | |
267 | * __of_node_alloc() - Create an empty device node dynamically. | |
268 | * @full_name: Full name of the new device node | |
269 | * @allocflags: Allocation flags (typically pass GFP_KERNEL) | |
270 | * | |
271 | * Create an empty device tree node, suitable for further modification. | |
272 | * The node data are dynamically allocated and all the node flags | |
273 | * have the OF_DYNAMIC & OF_DETACHED bits set. | |
274 | * Returns the newly allocated node or NULL on out of memory error. | |
275 | */ | |
276 | struct device_node *__of_node_alloc(const char *full_name, gfp_t allocflags) | |
277 | { | |
278 | struct device_node *node; | |
279 | ||
280 | node = kzalloc(sizeof(*node), allocflags); | |
281 | if (!node) | |
282 | return NULL; | |
283 | ||
284 | node->full_name = kstrdup(full_name, allocflags); | |
285 | of_node_set_flag(node, OF_DYNAMIC); | |
286 | of_node_set_flag(node, OF_DETACHED); | |
287 | if (!node->full_name) | |
288 | goto err_free; | |
289 | ||
290 | of_node_init(node); | |
291 | ||
292 | return node; | |
293 | ||
294 | err_free: | |
295 | kfree(node->full_name); | |
296 | kfree(node); | |
297 | return NULL; | |
298 | } |