]>
Commit | Line | Data |
---|---|---|
1a79f22d | 1 | // SPDX-License-Identifier: GPL-2.0 |
57562a72 CG |
2 | /* |
3 | * core.c - Implementation of core module of MOST Linux driver stack | |
4 | * | |
ca78e042 | 5 | * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG |
57562a72 CG |
6 | */ |
7 | ||
57562a72 CG |
8 | #include <linux/module.h> |
9 | #include <linux/fs.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/poll.h> | |
15 | #include <linux/wait.h> | |
16 | #include <linux/kobject.h> | |
17 | #include <linux/mutex.h> | |
18 | #include <linux/completion.h> | |
19 | #include <linux/sysfs.h> | |
20 | #include <linux/kthread.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/idr.h> | |
b2765275 | 23 | #include <linux/most.h> |
57562a72 CG |
24 | |
25 | #define MAX_CHANNELS 64 | |
26 | #define STRING_SIZE 80 | |
27 | ||
57562a72 | 28 | static struct ida mdev_id; |
71457d48 | 29 | static int dummy_num_buffers; |
d693e90d | 30 | static struct list_head comp_list; |
14ae5f03 | 31 | |
7faeffec | 32 | struct pipe { |
45917e79 | 33 | struct most_component *comp; |
ccfbaee0 CG |
34 | int refs; |
35 | int num_buffers; | |
36 | }; | |
37 | ||
fcb7fad8 | 38 | struct most_channel { |
4d5f022f | 39 | struct device dev; |
57562a72 CG |
40 | struct completion cleanup; |
41 | atomic_t mbo_ref; | |
42 | atomic_t mbo_nq_level; | |
2aa9b96f | 43 | u16 channel_id; |
845101be | 44 | char name[STRING_SIZE]; |
57562a72 | 45 | bool is_poisoned; |
af96ce03 | 46 | struct mutex start_mutex; /* channel activation synchronization */ |
bf9503f1 | 47 | struct mutex nq_mutex; /* nq thread synchronization */ |
57562a72 CG |
48 | int is_starving; |
49 | struct most_interface *iface; | |
57562a72 CG |
50 | struct most_channel_config cfg; |
51 | bool keep_mbo; | |
52 | bool enqueue_halt; | |
53 | struct list_head fifo; | |
af96ce03 | 54 | spinlock_t fifo_lock; /* fifo access synchronization */ |
57562a72 CG |
55 | struct list_head halt_fifo; |
56 | struct list_head list; | |
f898f989 CG |
57 | struct pipe pipe0; |
58 | struct pipe pipe1; | |
57562a72 CG |
59 | struct list_head trash_fifo; |
60 | struct task_struct *hdm_enqueue_task; | |
57562a72 | 61 | wait_queue_head_t hdm_fifo_wq; |
ed021a0f | 62 | |
57562a72 | 63 | }; |
9cbe5aa6 | 64 | |
fcb7fad8 | 65 | #define to_channel(d) container_of(d, struct most_channel, dev) |
57562a72 | 66 | |
9136fccf | 67 | struct interface_private { |
57562a72 | 68 | int dev_id; |
9136fccf | 69 | char name[STRING_SIZE]; |
fcb7fad8 | 70 | struct most_channel *channel[MAX_CHANNELS]; |
9136fccf | 71 | struct list_head channel_list; |
57562a72 | 72 | }; |
9cbe5aa6 | 73 | |
e7f2b70f HPGE |
74 | static const struct { |
75 | int most_ch_data_type; | |
06324664 | 76 | const char *name; |
95f73013 | 77 | } ch_data_type[] = { |
4df0991b CG |
78 | { MOST_CH_CONTROL, "control" }, |
79 | { MOST_CH_ASYNC, "async" }, | |
80 | { MOST_CH_SYNC, "sync" }, | |
81 | { MOST_CH_ISOC, "isoc"}, | |
82 | { MOST_CH_ISOC, "isoc_avp"}, | |
95f73013 | 83 | }; |
e7f2b70f | 84 | |
57562a72 CG |
85 | /** |
86 | * list_pop_mbo - retrieves the first MBO of the list and removes it | |
87 | * @ptr: the list head to grab the MBO from. | |
88 | */ | |
89 | #define list_pop_mbo(ptr) \ | |
90 | ({ \ | |
91 | struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \ | |
92 | list_del(&_mbo->list); \ | |
93 | _mbo; \ | |
94 | }) | |
95 | ||
57562a72 CG |
96 | /** |
97 | * most_free_mbo_coherent - free an MBO and its coherent buffer | |
b7937dc4 | 98 | * @mbo: most buffer |
57562a72 CG |
99 | */ |
100 | static void most_free_mbo_coherent(struct mbo *mbo) | |
101 | { | |
fcb7fad8 | 102 | struct most_channel *c = mbo->context; |
57562a72 CG |
103 | u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; |
104 | ||
3598cec5 CG |
105 | if (c->iface->dma_free) |
106 | c->iface->dma_free(mbo, coherent_buf_size); | |
107 | else | |
108 | kfree(mbo->virt_address); | |
57562a72 CG |
109 | kfree(mbo); |
110 | if (atomic_sub_and_test(1, &c->mbo_ref)) | |
111 | complete(&c->cleanup); | |
112 | } | |
113 | ||
114 | /** | |
115 | * flush_channel_fifos - clear the channel fifos | |
116 | * @c: pointer to channel object | |
117 | */ | |
fcb7fad8 | 118 | static void flush_channel_fifos(struct most_channel *c) |
57562a72 CG |
119 | { |
120 | unsigned long flags, hf_flags; | |
121 | struct mbo *mbo, *tmp; | |
122 | ||
123 | if (list_empty(&c->fifo) && list_empty(&c->halt_fifo)) | |
124 | return; | |
125 | ||
126 | spin_lock_irqsave(&c->fifo_lock, flags); | |
127 | list_for_each_entry_safe(mbo, tmp, &c->fifo, list) { | |
128 | list_del(&mbo->list); | |
129 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
0834be6c | 130 | most_free_mbo_coherent(mbo); |
57562a72 CG |
131 | spin_lock_irqsave(&c->fifo_lock, flags); |
132 | } | |
133 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
134 | ||
135 | spin_lock_irqsave(&c->fifo_lock, hf_flags); | |
136 | list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) { | |
137 | list_del(&mbo->list); | |
138 | spin_unlock_irqrestore(&c->fifo_lock, hf_flags); | |
0834be6c | 139 | most_free_mbo_coherent(mbo); |
57562a72 CG |
140 | spin_lock_irqsave(&c->fifo_lock, hf_flags); |
141 | } | |
142 | spin_unlock_irqrestore(&c->fifo_lock, hf_flags); | |
143 | ||
144 | if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo)))) | |
b7935e52 | 145 | dev_warn(&c->dev, "Channel or trash fifo not empty\n"); |
57562a72 CG |
146 | } |
147 | ||
148 | /** | |
149 | * flush_trash_fifo - clear the trash fifo | |
150 | * @c: pointer to channel object | |
151 | */ | |
fcb7fad8 | 152 | static int flush_trash_fifo(struct most_channel *c) |
57562a72 CG |
153 | { |
154 | struct mbo *mbo, *tmp; | |
155 | unsigned long flags; | |
156 | ||
157 | spin_lock_irqsave(&c->fifo_lock, flags); | |
158 | list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) { | |
159 | list_del(&mbo->list); | |
160 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
161 | most_free_mbo_coherent(mbo); | |
162 | spin_lock_irqsave(&c->fifo_lock, flags); | |
163 | } | |
164 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
165 | return 0; | |
166 | } | |
167 | ||
4d5f022f CG |
168 | static ssize_t available_directions_show(struct device *dev, |
169 | struct device_attribute *attr, | |
edaa1e33 | 170 | char *buf) |
57562a72 | 171 | { |
fcb7fad8 | 172 | struct most_channel *c = to_channel(dev); |
57562a72 CG |
173 | unsigned int i = c->channel_id; |
174 | ||
175 | strcpy(buf, ""); | |
176 | if (c->iface->channel_vector[i].direction & MOST_CH_RX) | |
95f73013 | 177 | strcat(buf, "rx "); |
57562a72 | 178 | if (c->iface->channel_vector[i].direction & MOST_CH_TX) |
95f73013 | 179 | strcat(buf, "tx "); |
57562a72 | 180 | strcat(buf, "\n"); |
22ff195b | 181 | return strlen(buf); |
57562a72 CG |
182 | } |
183 | ||
4d5f022f CG |
184 | static ssize_t available_datatypes_show(struct device *dev, |
185 | struct device_attribute *attr, | |
57562a72 CG |
186 | char *buf) |
187 | { | |
fcb7fad8 | 188 | struct most_channel *c = to_channel(dev); |
57562a72 CG |
189 | unsigned int i = c->channel_id; |
190 | ||
191 | strcpy(buf, ""); | |
192 | if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL) | |
193 | strcat(buf, "control "); | |
194 | if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC) | |
195 | strcat(buf, "async "); | |
196 | if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC) | |
197 | strcat(buf, "sync "); | |
0540609f | 198 | if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC) |
95f73013 | 199 | strcat(buf, "isoc "); |
57562a72 | 200 | strcat(buf, "\n"); |
22ff195b | 201 | return strlen(buf); |
57562a72 CG |
202 | } |
203 | ||
4d5f022f CG |
204 | static ssize_t number_of_packet_buffers_show(struct device *dev, |
205 | struct device_attribute *attr, | |
4dd7c7c7 | 206 | char *buf) |
57562a72 | 207 | { |
fcb7fad8 | 208 | struct most_channel *c = to_channel(dev); |
57562a72 CG |
209 | unsigned int i = c->channel_id; |
210 | ||
211 | return snprintf(buf, PAGE_SIZE, "%d\n", | |
212 | c->iface->channel_vector[i].num_buffers_packet); | |
213 | } | |
214 | ||
4d5f022f CG |
215 | static ssize_t number_of_stream_buffers_show(struct device *dev, |
216 | struct device_attribute *attr, | |
4dd7c7c7 | 217 | char *buf) |
57562a72 | 218 | { |
fcb7fad8 | 219 | struct most_channel *c = to_channel(dev); |
57562a72 CG |
220 | unsigned int i = c->channel_id; |
221 | ||
222 | return snprintf(buf, PAGE_SIZE, "%d\n", | |
223 | c->iface->channel_vector[i].num_buffers_streaming); | |
224 | } | |
225 | ||
4d5f022f CG |
226 | static ssize_t size_of_packet_buffer_show(struct device *dev, |
227 | struct device_attribute *attr, | |
4dd7c7c7 | 228 | char *buf) |
57562a72 | 229 | { |
fcb7fad8 | 230 | struct most_channel *c = to_channel(dev); |
57562a72 CG |
231 | unsigned int i = c->channel_id; |
232 | ||
233 | return snprintf(buf, PAGE_SIZE, "%d\n", | |
234 | c->iface->channel_vector[i].buffer_size_packet); | |
235 | } | |
236 | ||
4d5f022f CG |
237 | static ssize_t size_of_stream_buffer_show(struct device *dev, |
238 | struct device_attribute *attr, | |
4dd7c7c7 | 239 | char *buf) |
57562a72 | 240 | { |
fcb7fad8 | 241 | struct most_channel *c = to_channel(dev); |
57562a72 CG |
242 | unsigned int i = c->channel_id; |
243 | ||
244 | return snprintf(buf, PAGE_SIZE, "%d\n", | |
245 | c->iface->channel_vector[i].buffer_size_streaming); | |
246 | } | |
247 | ||
4d5f022f CG |
248 | static ssize_t channel_starving_show(struct device *dev, |
249 | struct device_attribute *attr, | |
57562a72 CG |
250 | char *buf) |
251 | { | |
fcb7fad8 | 252 | struct most_channel *c = to_channel(dev); |
4d5f022f | 253 | |
57562a72 CG |
254 | return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving); |
255 | } | |
256 | ||
4d5f022f CG |
257 | static ssize_t set_number_of_buffers_show(struct device *dev, |
258 | struct device_attribute *attr, | |
57562a72 CG |
259 | char *buf) |
260 | { | |
fcb7fad8 | 261 | struct most_channel *c = to_channel(dev); |
4d5f022f | 262 | |
57562a72 CG |
263 | return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers); |
264 | } | |
265 | ||
4d5f022f CG |
266 | static ssize_t set_buffer_size_show(struct device *dev, |
267 | struct device_attribute *attr, | |
57562a72 CG |
268 | char *buf) |
269 | { | |
fcb7fad8 | 270 | struct most_channel *c = to_channel(dev); |
4d5f022f | 271 | |
57562a72 CG |
272 | return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size); |
273 | } | |
274 | ||
4d5f022f CG |
275 | static ssize_t set_direction_show(struct device *dev, |
276 | struct device_attribute *attr, | |
57562a72 CG |
277 | char *buf) |
278 | { | |
fcb7fad8 | 279 | struct most_channel *c = to_channel(dev); |
4d5f022f | 280 | |
57562a72 | 281 | if (c->cfg.direction & MOST_CH_TX) |
95f73013 | 282 | return snprintf(buf, PAGE_SIZE, "tx\n"); |
57562a72 | 283 | else if (c->cfg.direction & MOST_CH_RX) |
95f73013 | 284 | return snprintf(buf, PAGE_SIZE, "rx\n"); |
57562a72 CG |
285 | return snprintf(buf, PAGE_SIZE, "unconfigured\n"); |
286 | } | |
287 | ||
4d5f022f CG |
288 | static ssize_t set_datatype_show(struct device *dev, |
289 | struct device_attribute *attr, | |
57562a72 CG |
290 | char *buf) |
291 | { | |
e7f2b70f | 292 | int i; |
fcb7fad8 | 293 | struct most_channel *c = to_channel(dev); |
e7f2b70f HPGE |
294 | |
295 | for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { | |
296 | if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) | |
f419f889 PT |
297 | return snprintf(buf, PAGE_SIZE, "%s", |
298 | ch_data_type[i].name); | |
e7f2b70f | 299 | } |
57562a72 CG |
300 | return snprintf(buf, PAGE_SIZE, "unconfigured\n"); |
301 | } | |
302 | ||
4d5f022f CG |
303 | static ssize_t set_subbuffer_size_show(struct device *dev, |
304 | struct device_attribute *attr, | |
57562a72 CG |
305 | char *buf) |
306 | { | |
fcb7fad8 | 307 | struct most_channel *c = to_channel(dev); |
4d5f022f | 308 | |
57562a72 CG |
309 | return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size); |
310 | } | |
311 | ||
4d5f022f CG |
312 | static ssize_t set_packets_per_xact_show(struct device *dev, |
313 | struct device_attribute *attr, | |
57562a72 CG |
314 | char *buf) |
315 | { | |
fcb7fad8 | 316 | struct most_channel *c = to_channel(dev); |
4d5f022f | 317 | |
57562a72 CG |
318 | return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact); |
319 | } | |
320 | ||
dbd36d57 CG |
321 | static ssize_t set_dbr_size_show(struct device *dev, |
322 | struct device_attribute *attr, char *buf) | |
323 | { | |
324 | struct most_channel *c = to_channel(dev); | |
325 | ||
326 | return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size); | |
327 | } | |
328 | ||
4ad86623 CG |
329 | #define to_dev_attr(a) container_of(a, struct device_attribute, attr) |
330 | static umode_t channel_attr_is_visible(struct kobject *kobj, | |
331 | struct attribute *attr, int index) | |
332 | { | |
333 | struct device_attribute *dev_attr = to_dev_attr(attr); | |
334 | struct device *dev = kobj_to_dev(kobj); | |
335 | struct most_channel *c = to_channel(dev); | |
336 | ||
337 | if (!strcmp(dev_attr->attr.name, "set_dbr_size") && | |
338 | (c->iface->interface != ITYPE_MEDIALB_DIM2)) | |
339 | return 0; | |
340 | if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") && | |
341 | (c->iface->interface != ITYPE_USB)) | |
342 | return 0; | |
343 | ||
344 | return attr->mode; | |
345 | } | |
346 | ||
4d5f022f CG |
347 | #define DEV_ATTR(_name) (&dev_attr_##_name.attr) |
348 | ||
349 | static DEVICE_ATTR_RO(available_directions); | |
350 | static DEVICE_ATTR_RO(available_datatypes); | |
351 | static DEVICE_ATTR_RO(number_of_packet_buffers); | |
352 | static DEVICE_ATTR_RO(number_of_stream_buffers); | |
353 | static DEVICE_ATTR_RO(size_of_stream_buffer); | |
354 | static DEVICE_ATTR_RO(size_of_packet_buffer); | |
355 | static DEVICE_ATTR_RO(channel_starving); | |
787105b3 CG |
356 | static DEVICE_ATTR_RO(set_buffer_size); |
357 | static DEVICE_ATTR_RO(set_number_of_buffers); | |
358 | static DEVICE_ATTR_RO(set_direction); | |
359 | static DEVICE_ATTR_RO(set_datatype); | |
360 | static DEVICE_ATTR_RO(set_subbuffer_size); | |
361 | static DEVICE_ATTR_RO(set_packets_per_xact); | |
362 | static DEVICE_ATTR_RO(set_dbr_size); | |
4d5f022f CG |
363 | |
364 | static struct attribute *channel_attrs[] = { | |
365 | DEV_ATTR(available_directions), | |
366 | DEV_ATTR(available_datatypes), | |
367 | DEV_ATTR(number_of_packet_buffers), | |
368 | DEV_ATTR(number_of_stream_buffers), | |
369 | DEV_ATTR(size_of_stream_buffer), | |
370 | DEV_ATTR(size_of_packet_buffer), | |
371 | DEV_ATTR(channel_starving), | |
372 | DEV_ATTR(set_buffer_size), | |
373 | DEV_ATTR(set_number_of_buffers), | |
374 | DEV_ATTR(set_direction), | |
375 | DEV_ATTR(set_datatype), | |
376 | DEV_ATTR(set_subbuffer_size), | |
377 | DEV_ATTR(set_packets_per_xact), | |
dbd36d57 | 378 | DEV_ATTR(set_dbr_size), |
57562a72 CG |
379 | NULL, |
380 | }; | |
381 | ||
26c2e922 | 382 | static const struct attribute_group channel_attr_group = { |
4d5f022f | 383 | .attrs = channel_attrs, |
4ad86623 | 384 | .is_visible = channel_attr_is_visible, |
57562a72 CG |
385 | }; |
386 | ||
4d5f022f CG |
387 | static const struct attribute_group *channel_attr_groups[] = { |
388 | &channel_attr_group, | |
389 | NULL, | |
390 | }; | |
57562a72 | 391 | |
4d5f022f CG |
392 | static ssize_t description_show(struct device *dev, |
393 | struct device_attribute *attr, | |
57562a72 CG |
394 | char *buf) |
395 | { | |
723de0f9 | 396 | struct most_interface *iface = dev_get_drvdata(dev); |
4d5f022f CG |
397 | |
398 | return snprintf(buf, PAGE_SIZE, "%s\n", iface->description); | |
57562a72 CG |
399 | } |
400 | ||
4d5f022f CG |
401 | static ssize_t interface_show(struct device *dev, |
402 | struct device_attribute *attr, | |
57562a72 CG |
403 | char *buf) |
404 | { | |
723de0f9 | 405 | struct most_interface *iface = dev_get_drvdata(dev); |
4d5f022f CG |
406 | |
407 | switch (iface->interface) { | |
57562a72 CG |
408 | case ITYPE_LOOPBACK: |
409 | return snprintf(buf, PAGE_SIZE, "loopback\n"); | |
410 | case ITYPE_I2C: | |
411 | return snprintf(buf, PAGE_SIZE, "i2c\n"); | |
412 | case ITYPE_I2S: | |
413 | return snprintf(buf, PAGE_SIZE, "i2s\n"); | |
414 | case ITYPE_TSI: | |
415 | return snprintf(buf, PAGE_SIZE, "tsi\n"); | |
416 | case ITYPE_HBI: | |
417 | return snprintf(buf, PAGE_SIZE, "hbi\n"); | |
418 | case ITYPE_MEDIALB_DIM: | |
419 | return snprintf(buf, PAGE_SIZE, "mlb_dim\n"); | |
420 | case ITYPE_MEDIALB_DIM2: | |
421 | return snprintf(buf, PAGE_SIZE, "mlb_dim2\n"); | |
422 | case ITYPE_USB: | |
423 | return snprintf(buf, PAGE_SIZE, "usb\n"); | |
424 | case ITYPE_PCIE: | |
425 | return snprintf(buf, PAGE_SIZE, "pcie\n"); | |
426 | } | |
427 | return snprintf(buf, PAGE_SIZE, "unknown\n"); | |
428 | } | |
429 | ||
4d5f022f CG |
430 | static DEVICE_ATTR_RO(description); |
431 | static DEVICE_ATTR_RO(interface); | |
57562a72 | 432 | |
4d5f022f CG |
433 | static struct attribute *interface_attrs[] = { |
434 | DEV_ATTR(description), | |
435 | DEV_ATTR(interface), | |
57562a72 CG |
436 | NULL, |
437 | }; | |
438 | ||
26c2e922 | 439 | static const struct attribute_group interface_attr_group = { |
4d5f022f | 440 | .attrs = interface_attrs, |
57562a72 CG |
441 | }; |
442 | ||
4d5f022f CG |
443 | static const struct attribute_group *interface_attr_groups[] = { |
444 | &interface_attr_group, | |
445 | NULL, | |
446 | }; | |
57562a72 | 447 | |
45917e79 | 448 | static struct most_component *match_component(char *name) |
bdafb7e8 | 449 | { |
45917e79 | 450 | struct most_component *comp; |
bdafb7e8 | 451 | |
d693e90d | 452 | list_for_each_entry(comp, &comp_list, list) { |
5a5abf02 CG |
453 | if (!strcmp(comp->name, name)) |
454 | return comp; | |
bdafb7e8 CG |
455 | } |
456 | return NULL; | |
457 | } | |
458 | ||
e7e3ce04 AS |
459 | struct show_links_data { |
460 | int offs; | |
461 | char *buf; | |
462 | }; | |
463 | ||
845c31de | 464 | static int print_links(struct device *dev, void *data) |
57562a72 | 465 | { |
e7e3ce04 AS |
466 | struct show_links_data *d = data; |
467 | int offs = d->offs; | |
468 | char *buf = d->buf; | |
9136fccf | 469 | struct most_channel *c; |
723de0f9 | 470 | struct most_interface *iface = dev_get_drvdata(dev); |
bc5f96a1 | 471 | |
9136fccf | 472 | list_for_each_entry(c, &iface->p->channel_list, list) { |
5a5abf02 | 473 | if (c->pipe0.comp) { |
234ff542 | 474 | offs += scnprintf(buf + offs, |
9136fccf CG |
475 | PAGE_SIZE - offs, |
476 | "%s:%s:%s\n", | |
5a5abf02 | 477 | c->pipe0.comp->name, |
723de0f9 | 478 | dev_name(iface->dev), |
9136fccf CG |
479 | dev_name(&c->dev)); |
480 | } | |
5a5abf02 | 481 | if (c->pipe1.comp) { |
234ff542 | 482 | offs += scnprintf(buf + offs, |
9136fccf CG |
483 | PAGE_SIZE - offs, |
484 | "%s:%s:%s\n", | |
5a5abf02 | 485 | c->pipe1.comp->name, |
723de0f9 | 486 | dev_name(iface->dev), |
9136fccf | 487 | dev_name(&c->dev)); |
bc5f96a1 CG |
488 | } |
489 | } | |
e7e3ce04 | 490 | d->offs = offs; |
9136fccf CG |
491 | return 0; |
492 | } | |
493 | ||
d69d8048 | 494 | static int most_match(struct device *dev, const struct device_driver *drv) |
d693e90d CG |
495 | { |
496 | if (!strcmp(dev_name(dev), "most")) | |
497 | return 0; | |
498 | else | |
499 | return 1; | |
500 | } | |
501 | ||
408b18b1 | 502 | static const struct bus_type mostbus = { |
d693e90d CG |
503 | .name = "most", |
504 | .match = most_match, | |
505 | }; | |
506 | ||
9136fccf CG |
507 | static ssize_t links_show(struct device_driver *drv, char *buf) |
508 | { | |
e7e3ce04 AS |
509 | struct show_links_data d = { .buf = buf }; |
510 | ||
d693e90d | 511 | bus_for_each_dev(&mostbus, NULL, &d, print_links); |
e7e3ce04 | 512 | return d.offs; |
57562a72 CG |
513 | } |
514 | ||
fdbdc0e6 | 515 | static ssize_t components_show(struct device_driver *drv, char *buf) |
bdafb7e8 | 516 | { |
45917e79 | 517 | struct most_component *comp; |
bdafb7e8 CG |
518 | int offs = 0; |
519 | ||
d693e90d | 520 | list_for_each_entry(comp, &comp_list, list) { |
234ff542 | 521 | offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n", |
5a5abf02 | 522 | comp->name); |
bdafb7e8 CG |
523 | } |
524 | return offs; | |
525 | } | |
ed49a3bd | 526 | |
57562a72 | 527 | /** |
b7937dc4 CG |
528 | * get_channel - get pointer to channel |
529 | * @mdev: name of the device interface | |
530 | * @mdev_ch: name of channel | |
57562a72 | 531 | */ |
ec0c2f62 | 532 | static struct most_channel *get_channel(char *mdev, char *mdev_ch) |
57562a72 | 533 | { |
9136fccf CG |
534 | struct device *dev = NULL; |
535 | struct most_interface *iface; | |
fcb7fad8 | 536 | struct most_channel *c, *tmp; |
57562a72 | 537 | |
d693e90d | 538 | dev = bus_find_device_by_name(&mostbus, NULL, mdev); |
9136fccf CG |
539 | if (!dev) |
540 | return NULL; | |
24850553 | 541 | put_device(dev); |
723de0f9 | 542 | iface = dev_get_drvdata(dev); |
9136fccf CG |
543 | list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { |
544 | if (!strcmp(dev_name(&c->dev), mdev_ch)) | |
545 | return c; | |
57562a72 | 546 | } |
9136fccf | 547 | return NULL; |
57562a72 CG |
548 | } |
549 | ||
fcb7fad8 | 550 | static |
db09fe0d | 551 | inline int link_channel_to_component(struct most_channel *c, |
45917e79 | 552 | struct most_component *comp, |
dfee92dd | 553 | char *name, |
db09fe0d | 554 | char *comp_param) |
e6e79b44 CG |
555 | { |
556 | int ret; | |
45917e79 | 557 | struct most_component **comp_ptr; |
e6e79b44 | 558 | |
5a5abf02 CG |
559 | if (!c->pipe0.comp) |
560 | comp_ptr = &c->pipe0.comp; | |
561 | else if (!c->pipe1.comp) | |
562 | comp_ptr = &c->pipe1.comp; | |
e6e79b44 CG |
563 | else |
564 | return -ENOSPC; | |
565 | ||
5a5abf02 | 566 | *comp_ptr = comp; |
dfee92dd CG |
567 | ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name, |
568 | comp_param); | |
e6e79b44 | 569 | if (ret) { |
5a5abf02 | 570 | *comp_ptr = NULL; |
e6e79b44 CG |
571 | return ret; |
572 | } | |
e6e79b44 CG |
573 | return 0; |
574 | } | |
575 | ||
3d89b273 CG |
576 | int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val) |
577 | { | |
578 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
579 | ||
580 | if (!c) | |
581 | return -ENODEV; | |
582 | c->cfg.buffer_size = val; | |
583 | return 0; | |
584 | } | |
585 | ||
586 | int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val) | |
587 | { | |
588 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
589 | ||
590 | if (!c) | |
591 | return -ENODEV; | |
592 | c->cfg.subbuffer_size = val; | |
593 | return 0; | |
594 | } | |
595 | ||
596 | int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val) | |
597 | { | |
598 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
599 | ||
600 | if (!c) | |
601 | return -ENODEV; | |
602 | c->cfg.dbr_size = val; | |
603 | return 0; | |
604 | } | |
605 | ||
606 | int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val) | |
607 | { | |
608 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
609 | ||
610 | if (!c) | |
611 | return -ENODEV; | |
612 | c->cfg.num_buffers = val; | |
613 | return 0; | |
614 | } | |
615 | ||
616 | int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf) | |
617 | { | |
618 | int i; | |
619 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
620 | ||
621 | if (!c) | |
622 | return -ENODEV; | |
623 | for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { | |
624 | if (!strcmp(buf, ch_data_type[i].name)) { | |
625 | c->cfg.data_type = ch_data_type[i].most_ch_data_type; | |
626 | break; | |
627 | } | |
628 | } | |
629 | ||
630 | if (i == ARRAY_SIZE(ch_data_type)) | |
b7935e52 | 631 | dev_warn(&c->dev, "Invalid attribute settings\n"); |
3d89b273 CG |
632 | return 0; |
633 | } | |
634 | ||
635 | int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf) | |
636 | { | |
637 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
638 | ||
639 | if (!c) | |
640 | return -ENODEV; | |
4df0991b | 641 | if (!strcmp(buf, "dir_rx")) { |
3d89b273 | 642 | c->cfg.direction = MOST_CH_RX; |
4df0991b | 643 | } else if (!strcmp(buf, "rx")) { |
3d89b273 | 644 | c->cfg.direction = MOST_CH_RX; |
4df0991b | 645 | } else if (!strcmp(buf, "dir_tx")) { |
3d89b273 | 646 | c->cfg.direction = MOST_CH_TX; |
4df0991b | 647 | } else if (!strcmp(buf, "tx")) { |
3d89b273 CG |
648 | c->cfg.direction = MOST_CH_TX; |
649 | } else { | |
6a82c775 | 650 | dev_err(&c->dev, "Invalid direction\n"); |
3d89b273 CG |
651 | return -ENODATA; |
652 | } | |
653 | return 0; | |
654 | } | |
655 | ||
656 | int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val) | |
657 | { | |
658 | struct most_channel *c = get_channel(mdev, mdev_ch); | |
659 | ||
660 | if (!c) | |
661 | return -ENODEV; | |
662 | c->cfg.packets_per_xact = val; | |
663 | return 0; | |
664 | } | |
665 | ||
666 | int most_cfg_complete(char *comp_name) | |
667 | { | |
45917e79 | 668 | struct most_component *comp; |
3d89b273 CG |
669 | |
670 | comp = match_component(comp_name); | |
671 | if (!comp) | |
672 | return -ENODEV; | |
673 | ||
674 | return comp->cfg_complete(); | |
675 | } | |
676 | ||
677 | int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name, | |
678 | char *comp_param) | |
679 | { | |
acdbb897 | 680 | struct most_channel *c = get_channel(mdev, mdev_ch); |
45917e79 | 681 | struct most_component *comp = match_component(comp_name); |
3d89b273 | 682 | |
acdbb897 | 683 | if (!c || !comp) |
3d89b273 CG |
684 | return -ENODEV; |
685 | ||
686 | return link_channel_to_component(c, comp, link_name, comp_param); | |
687 | } | |
f419f889 | 688 | |
3d89b273 CG |
689 | int most_remove_link(char *mdev, char *mdev_ch, char *comp_name) |
690 | { | |
691 | struct most_channel *c; | |
45917e79 | 692 | struct most_component *comp; |
3d89b273 CG |
693 | |
694 | comp = match_component(comp_name); | |
695 | if (!comp) | |
696 | return -ENODEV; | |
697 | c = get_channel(mdev, mdev_ch); | |
698 | if (!c) | |
699 | return -ENODEV; | |
700 | ||
701 | if (comp->disconnect_channel(c->iface, c->channel_id)) | |
702 | return -EIO; | |
703 | if (c->pipe0.comp == comp) | |
704 | c->pipe0.comp = NULL; | |
705 | if (c->pipe1.comp == comp) | |
706 | c->pipe1.comp = NULL; | |
707 | return 0; | |
708 | } | |
709 | ||
bdafb7e8 CG |
710 | #define DRV_ATTR(_name) (&driver_attr_##_name.attr) |
711 | ||
712 | static DRIVER_ATTR_RO(links); | |
fdbdc0e6 | 713 | static DRIVER_ATTR_RO(components); |
57562a72 | 714 | |
fdbdc0e6 | 715 | static struct attribute *mc_attrs[] = { |
bdafb7e8 | 716 | DRV_ATTR(links), |
fdbdc0e6 | 717 | DRV_ATTR(components), |
57562a72 CG |
718 | NULL, |
719 | }; | |
720 | ||
26c2e922 | 721 | static const struct attribute_group mc_attr_group = { |
fdbdc0e6 | 722 | .attrs = mc_attrs, |
57562a72 CG |
723 | }; |
724 | ||
fdbdc0e6 CG |
725 | static const struct attribute_group *mc_attr_groups[] = { |
726 | &mc_attr_group, | |
4d5f022f CG |
727 | NULL, |
728 | }; | |
57562a72 | 729 | |
d693e90d CG |
730 | static struct device_driver mostbus_driver = { |
731 | .name = "most_core", | |
732 | .bus = &mostbus, | |
733 | .groups = mc_attr_groups, | |
734 | }; | |
921c80c5 | 735 | |
57562a72 CG |
736 | static inline void trash_mbo(struct mbo *mbo) |
737 | { | |
738 | unsigned long flags; | |
fcb7fad8 | 739 | struct most_channel *c = mbo->context; |
57562a72 CG |
740 | |
741 | spin_lock_irqsave(&c->fifo_lock, flags); | |
742 | list_add(&mbo->list, &c->trash_fifo); | |
743 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
744 | } | |
745 | ||
fcb7fad8 | 746 | static bool hdm_mbo_ready(struct most_channel *c) |
57562a72 | 747 | { |
bf9503f1 | 748 | bool empty; |
57562a72 | 749 | |
bf9503f1 CG |
750 | if (c->enqueue_halt) |
751 | return false; | |
752 | ||
753 | spin_lock_irq(&c->fifo_lock); | |
754 | empty = list_empty(&c->halt_fifo); | |
755 | spin_unlock_irq(&c->fifo_lock); | |
756 | ||
757 | return !empty; | |
57562a72 CG |
758 | } |
759 | ||
760 | static void nq_hdm_mbo(struct mbo *mbo) | |
761 | { | |
762 | unsigned long flags; | |
fcb7fad8 | 763 | struct most_channel *c = mbo->context; |
57562a72 CG |
764 | |
765 | spin_lock_irqsave(&c->fifo_lock, flags); | |
766 | list_add_tail(&mbo->list, &c->halt_fifo); | |
767 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
768 | wake_up_interruptible(&c->hdm_fifo_wq); | |
769 | } | |
770 | ||
771 | static int hdm_enqueue_thread(void *data) | |
772 | { | |
fcb7fad8 | 773 | struct most_channel *c = data; |
57562a72 | 774 | struct mbo *mbo; |
bf9503f1 | 775 | int ret; |
57562a72 CG |
776 | typeof(c->iface->enqueue) enqueue = c->iface->enqueue; |
777 | ||
778 | while (likely(!kthread_should_stop())) { | |
779 | wait_event_interruptible(c->hdm_fifo_wq, | |
bf9503f1 | 780 | hdm_mbo_ready(c) || |
623d8002 | 781 | kthread_should_stop()); |
57562a72 | 782 | |
bf9503f1 CG |
783 | mutex_lock(&c->nq_mutex); |
784 | spin_lock_irq(&c->fifo_lock); | |
785 | if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) { | |
786 | spin_unlock_irq(&c->fifo_lock); | |
787 | mutex_unlock(&c->nq_mutex); | |
57562a72 | 788 | continue; |
bf9503f1 CG |
789 | } |
790 | ||
791 | mbo = list_pop_mbo(&c->halt_fifo); | |
792 | spin_unlock_irq(&c->fifo_lock); | |
57562a72 CG |
793 | |
794 | if (c->cfg.direction == MOST_CH_RX) | |
795 | mbo->buffer_length = c->cfg.buffer_size; | |
796 | ||
bf9503f1 CG |
797 | ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo); |
798 | mutex_unlock(&c->nq_mutex); | |
799 | ||
800 | if (unlikely(ret)) { | |
b7935e52 | 801 | dev_err(&c->dev, "Buffer enqueue failed\n"); |
57562a72 CG |
802 | nq_hdm_mbo(mbo); |
803 | c->hdm_enqueue_task = NULL; | |
804 | return 0; | |
805 | } | |
806 | } | |
807 | ||
808 | return 0; | |
809 | } | |
810 | ||
fcb7fad8 | 811 | static int run_enqueue_thread(struct most_channel *c, int channel_id) |
57562a72 CG |
812 | { |
813 | struct task_struct *task = | |
246ed517 SB |
814 | kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d", |
815 | channel_id); | |
57562a72 CG |
816 | |
817 | if (IS_ERR(task)) | |
818 | return PTR_ERR(task); | |
819 | ||
820 | c->hdm_enqueue_task = task; | |
821 | return 0; | |
822 | } | |
823 | ||
824 | /** | |
825 | * arm_mbo - recycle MBO for further usage | |
b7937dc4 | 826 | * @mbo: most buffer |
57562a72 CG |
827 | * |
828 | * This puts an MBO back to the list to have it ready for up coming | |
829 | * tx transactions. | |
830 | * | |
831 | * In case the MBO belongs to a channel that recently has been | |
832 | * poisoned, the MBO is scheduled to be trashed. | |
b7937dc4 | 833 | * Calls the completion handler of an attached component. |
57562a72 CG |
834 | */ |
835 | static void arm_mbo(struct mbo *mbo) | |
836 | { | |
837 | unsigned long flags; | |
fcb7fad8 | 838 | struct most_channel *c; |
57562a72 | 839 | |
57562a72 CG |
840 | c = mbo->context; |
841 | ||
842 | if (c->is_poisoned) { | |
843 | trash_mbo(mbo); | |
844 | return; | |
845 | } | |
846 | ||
847 | spin_lock_irqsave(&c->fifo_lock, flags); | |
71457d48 | 848 | ++*mbo->num_buffers_ptr; |
57562a72 CG |
849 | list_add_tail(&mbo->list, &c->fifo); |
850 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
851 | ||
5a5abf02 CG |
852 | if (c->pipe0.refs && c->pipe0.comp->tx_completion) |
853 | c->pipe0.comp->tx_completion(c->iface, c->channel_id); | |
f13f6981 | 854 | |
5a5abf02 CG |
855 | if (c->pipe1.refs && c->pipe1.comp->tx_completion) |
856 | c->pipe1.comp->tx_completion(c->iface, c->channel_id); | |
57562a72 CG |
857 | } |
858 | ||
859 | /** | |
860 | * arm_mbo_chain - helper function that arms an MBO chain for the HDM | |
861 | * @c: pointer to interface channel | |
862 | * @dir: direction of the channel | |
863 | * @compl: pointer to completion function | |
864 | * | |
865 | * This allocates buffer objects including the containing DMA coherent | |
866 | * buffer and puts them in the fifo. | |
867 | * Buffers of Rx channels are put in the kthread fifo, hence immediately | |
868 | * submitted to the HDM. | |
869 | * | |
870 | * Returns the number of allocated and enqueued MBOs. | |
871 | */ | |
fcb7fad8 | 872 | static int arm_mbo_chain(struct most_channel *c, int dir, |
c942ea7a | 873 | void (*compl)(struct mbo *)) |
57562a72 CG |
874 | { |
875 | unsigned int i; | |
57562a72 | 876 | struct mbo *mbo; |
aaf40322 | 877 | unsigned long flags; |
2ae07510 | 878 | u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; |
57562a72 CG |
879 | |
880 | atomic_set(&c->mbo_nq_level, 0); | |
881 | ||
882 | for (i = 0; i < c->cfg.num_buffers; i++) { | |
883 | mbo = kzalloc(sizeof(*mbo), GFP_KERNEL); | |
aaf40322 CG |
884 | if (!mbo) |
885 | goto flush_fifos; | |
886 | ||
57562a72 CG |
887 | mbo->context = c; |
888 | mbo->ifp = c->iface; | |
889 | mbo->hdm_channel_id = c->channel_id; | |
3598cec5 CG |
890 | if (c->iface->dma_alloc) { |
891 | mbo->virt_address = | |
892 | c->iface->dma_alloc(mbo, coherent_buf_size); | |
893 | } else { | |
894 | mbo->virt_address = | |
895 | kzalloc(coherent_buf_size, GFP_KERNEL); | |
896 | } | |
aaf40322 CG |
897 | if (!mbo->virt_address) |
898 | goto release_mbo; | |
899 | ||
57562a72 | 900 | mbo->complete = compl; |
71457d48 | 901 | mbo->num_buffers_ptr = &dummy_num_buffers; |
57562a72 CG |
902 | if (dir == MOST_CH_RX) { |
903 | nq_hdm_mbo(mbo); | |
904 | atomic_inc(&c->mbo_nq_level); | |
905 | } else { | |
aaf40322 CG |
906 | spin_lock_irqsave(&c->fifo_lock, flags); |
907 | list_add_tail(&mbo->list, &c->fifo); | |
908 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
57562a72 CG |
909 | } |
910 | } | |
aaf40322 | 911 | return c->cfg.num_buffers; |
57562a72 | 912 | |
aaf40322 | 913 | release_mbo: |
57562a72 | 914 | kfree(mbo); |
aaf40322 CG |
915 | |
916 | flush_fifos: | |
917 | flush_channel_fifos(c); | |
918 | return 0; | |
57562a72 CG |
919 | } |
920 | ||
921 | /** | |
922 | * most_submit_mbo - submits an MBO to fifo | |
b7937dc4 | 923 | * @mbo: most buffer |
57562a72 | 924 | */ |
a6f9d846 | 925 | void most_submit_mbo(struct mbo *mbo) |
57562a72 | 926 | { |
a6f9d846 | 927 | if (WARN_ONCE(!mbo || !mbo->context, |
b7935e52 | 928 | "Bad buffer or missing channel reference\n")) |
a6f9d846 | 929 | return; |
57562a72 CG |
930 | |
931 | nq_hdm_mbo(mbo); | |
57562a72 CG |
932 | } |
933 | EXPORT_SYMBOL_GPL(most_submit_mbo); | |
934 | ||
935 | /** | |
936 | * most_write_completion - write completion handler | |
b7937dc4 | 937 | * @mbo: most buffer |
57562a72 CG |
938 | * |
939 | * This recycles the MBO for further usage. In case the channel has been | |
940 | * poisoned, the MBO is scheduled to be trashed. | |
941 | */ | |
942 | static void most_write_completion(struct mbo *mbo) | |
943 | { | |
fcb7fad8 | 944 | struct most_channel *c; |
57562a72 | 945 | |
57562a72 | 946 | c = mbo->context; |
ec58d2a8 | 947 | if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) |
57562a72 CG |
948 | trash_mbo(mbo); |
949 | else | |
950 | arm_mbo(mbo); | |
951 | } | |
952 | ||
5a5abf02 | 953 | int channel_has_mbo(struct most_interface *iface, int id, |
45917e79 | 954 | struct most_component *comp) |
aac997df | 955 | { |
9136fccf | 956 | struct most_channel *c = iface->p->channel[id]; |
aac997df CG |
957 | unsigned long flags; |
958 | int empty; | |
959 | ||
960 | if (unlikely(!c)) | |
961 | return -EINVAL; | |
962 | ||
f898f989 | 963 | if (c->pipe0.refs && c->pipe1.refs && |
5a5abf02 CG |
964 | ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || |
965 | (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) | |
cdc293d5 CG |
966 | return 0; |
967 | ||
aac997df CG |
968 | spin_lock_irqsave(&c->fifo_lock, flags); |
969 | empty = list_empty(&c->fifo); | |
970 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
971 | return !empty; | |
972 | } | |
973 | EXPORT_SYMBOL_GPL(channel_has_mbo); | |
974 | ||
57562a72 CG |
975 | /** |
976 | * most_get_mbo - get pointer to an MBO of pool | |
977 | * @iface: pointer to interface instance | |
978 | * @id: channel ID | |
b7937dc4 | 979 | * @comp: driver component |
57562a72 CG |
980 | * |
981 | * This attempts to get a free buffer out of the channel fifo. | |
982 | * Returns a pointer to MBO on success or NULL otherwise. | |
983 | */ | |
71457d48 | 984 | struct mbo *most_get_mbo(struct most_interface *iface, int id, |
45917e79 | 985 | struct most_component *comp) |
57562a72 CG |
986 | { |
987 | struct mbo *mbo; | |
fcb7fad8 | 988 | struct most_channel *c; |
57562a72 | 989 | unsigned long flags; |
71457d48 | 990 | int *num_buffers_ptr; |
57562a72 | 991 | |
9136fccf | 992 | c = iface->p->channel[id]; |
57562a72 CG |
993 | if (unlikely(!c)) |
994 | return NULL; | |
71457d48 | 995 | |
f898f989 | 996 | if (c->pipe0.refs && c->pipe1.refs && |
5a5abf02 CG |
997 | ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || |
998 | (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) | |
71457d48 CG |
999 | return NULL; |
1000 | ||
5a5abf02 | 1001 | if (comp == c->pipe0.comp) |
f898f989 | 1002 | num_buffers_ptr = &c->pipe0.num_buffers; |
5a5abf02 | 1003 | else if (comp == c->pipe1.comp) |
f898f989 | 1004 | num_buffers_ptr = &c->pipe1.num_buffers; |
71457d48 CG |
1005 | else |
1006 | num_buffers_ptr = &dummy_num_buffers; | |
1007 | ||
57562a72 CG |
1008 | spin_lock_irqsave(&c->fifo_lock, flags); |
1009 | if (list_empty(&c->fifo)) { | |
1010 | spin_unlock_irqrestore(&c->fifo_lock, flags); | |
1011 | return NULL; | |
1012 | } | |
1013 | mbo = list_pop_mbo(&c->fifo); | |
71457d48 | 1014 | --*num_buffers_ptr; |
57562a72 | 1015 | spin_unlock_irqrestore(&c->fifo_lock, flags); |
71457d48 CG |
1016 | |
1017 | mbo->num_buffers_ptr = num_buffers_ptr; | |
57562a72 CG |
1018 | mbo->buffer_length = c->cfg.buffer_size; |
1019 | return mbo; | |
1020 | } | |
1021 | EXPORT_SYMBOL_GPL(most_get_mbo); | |
1022 | ||
57562a72 CG |
1023 | /** |
1024 | * most_put_mbo - return buffer to pool | |
b7937dc4 | 1025 | * @mbo: most buffer |
57562a72 CG |
1026 | */ |
1027 | void most_put_mbo(struct mbo *mbo) | |
1028 | { | |
fcb7fad8 | 1029 | struct most_channel *c = mbo->context; |
57562a72 | 1030 | |
57562a72 CG |
1031 | if (c->cfg.direction == MOST_CH_TX) { |
1032 | arm_mbo(mbo); | |
1033 | return; | |
1034 | } | |
1035 | nq_hdm_mbo(mbo); | |
1036 | atomic_inc(&c->mbo_nq_level); | |
1037 | } | |
1038 | EXPORT_SYMBOL_GPL(most_put_mbo); | |
1039 | ||
1040 | /** | |
1041 | * most_read_completion - read completion handler | |
b7937dc4 | 1042 | * @mbo: most buffer |
57562a72 CG |
1043 | * |
1044 | * This function is called by the HDM when data has been received from the | |
1045 | * hardware and copied to the buffer of the MBO. | |
1046 | * | |
1047 | * In case the channel has been poisoned it puts the buffer in the trash queue. | |
b7937dc4 | 1048 | * Otherwise, it passes the buffer to an component for further processing. |
57562a72 CG |
1049 | */ |
1050 | static void most_read_completion(struct mbo *mbo) | |
1051 | { | |
fcb7fad8 | 1052 | struct most_channel *c = mbo->context; |
57562a72 | 1053 | |
f13f6981 CG |
1054 | if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) { |
1055 | trash_mbo(mbo); | |
1056 | return; | |
1057 | } | |
57562a72 CG |
1058 | |
1059 | if (mbo->status == MBO_E_INVAL) { | |
1060 | nq_hdm_mbo(mbo); | |
1061 | atomic_inc(&c->mbo_nq_level); | |
1062 | return; | |
1063 | } | |
1064 | ||
5a63e23a | 1065 | if (atomic_sub_and_test(1, &c->mbo_nq_level)) |
57562a72 | 1066 | c->is_starving = 1; |
57562a72 | 1067 | |
5a5abf02 CG |
1068 | if (c->pipe0.refs && c->pipe0.comp->rx_completion && |
1069 | c->pipe0.comp->rx_completion(mbo) == 0) | |
57562a72 | 1070 | return; |
f13f6981 | 1071 | |
5a5abf02 CG |
1072 | if (c->pipe1.refs && c->pipe1.comp->rx_completion && |
1073 | c->pipe1.comp->rx_completion(mbo) == 0) | |
57562a72 | 1074 | return; |
f13f6981 CG |
1075 | |
1076 | most_put_mbo(mbo); | |
57562a72 CG |
1077 | } |
1078 | ||
1079 | /** | |
1080 | * most_start_channel - prepares a channel for communication | |
1081 | * @iface: pointer to interface instance | |
1082 | * @id: channel ID | |
b7937dc4 | 1083 | * @comp: driver component |
57562a72 CG |
1084 | * |
1085 | * This prepares the channel for usage. Cross-checks whether the | |
1086 | * channel's been properly configured. | |
1087 | * | |
1088 | * Returns 0 on success or error code otherwise. | |
1089 | */ | |
f13f6981 | 1090 | int most_start_channel(struct most_interface *iface, int id, |
45917e79 | 1091 | struct most_component *comp) |
57562a72 CG |
1092 | { |
1093 | int num_buffer; | |
1094 | int ret; | |
9136fccf | 1095 | struct most_channel *c = iface->p->channel[id]; |
57562a72 CG |
1096 | |
1097 | if (unlikely(!c)) | |
1098 | return -EINVAL; | |
1099 | ||
f13f6981 | 1100 | mutex_lock(&c->start_mutex); |
f898f989 | 1101 | if (c->pipe0.refs + c->pipe1.refs > 0) |
b7937dc4 | 1102 | goto out; /* already started by another component */ |
57562a72 CG |
1103 | |
1104 | if (!try_module_get(iface->mod)) { | |
b7935e52 | 1105 | dev_err(&c->dev, "Failed to acquire HDM lock\n"); |
f13f6981 | 1106 | mutex_unlock(&c->start_mutex); |
57562a72 CG |
1107 | return -ENOLCK; |
1108 | } | |
57562a72 CG |
1109 | |
1110 | c->cfg.extra_len = 0; | |
1111 | if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) { | |
b7935e52 | 1112 | dev_err(&c->dev, "Channel configuration failed. Go check settings...\n"); |
57562a72 | 1113 | ret = -EINVAL; |
bddd3c25 | 1114 | goto err_put_module; |
57562a72 CG |
1115 | } |
1116 | ||
1117 | init_waitqueue_head(&c->hdm_fifo_wq); | |
1118 | ||
1119 | if (c->cfg.direction == MOST_CH_RX) | |
1120 | num_buffer = arm_mbo_chain(c, c->cfg.direction, | |
1121 | most_read_completion); | |
1122 | else | |
1123 | num_buffer = arm_mbo_chain(c, c->cfg.direction, | |
1124 | most_write_completion); | |
47af41b0 | 1125 | if (unlikely(!num_buffer)) { |
57562a72 | 1126 | ret = -ENOMEM; |
bddd3c25 | 1127 | goto err_put_module; |
57562a72 CG |
1128 | } |
1129 | ||
1130 | ret = run_enqueue_thread(c, id); | |
1131 | if (ret) | |
bddd3c25 | 1132 | goto err_put_module; |
57562a72 | 1133 | |
57562a72 | 1134 | c->is_starving = 0; |
f898f989 CG |
1135 | c->pipe0.num_buffers = c->cfg.num_buffers / 2; |
1136 | c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers; | |
57562a72 | 1137 | atomic_set(&c->mbo_ref, num_buffer); |
f13f6981 CG |
1138 | |
1139 | out: | |
5a5abf02 | 1140 | if (comp == c->pipe0.comp) |
f898f989 | 1141 | c->pipe0.refs++; |
5a5abf02 | 1142 | if (comp == c->pipe1.comp) |
f898f989 | 1143 | c->pipe1.refs++; |
f13f6981 | 1144 | mutex_unlock(&c->start_mutex); |
57562a72 | 1145 | return 0; |
f13f6981 | 1146 | |
bddd3c25 | 1147 | err_put_module: |
e23afff9 | 1148 | module_put(iface->mod); |
f13f6981 | 1149 | mutex_unlock(&c->start_mutex); |
57562a72 CG |
1150 | return ret; |
1151 | } | |
1152 | EXPORT_SYMBOL_GPL(most_start_channel); | |
1153 | ||
1154 | /** | |
1155 | * most_stop_channel - stops a running channel | |
1156 | * @iface: pointer to interface instance | |
1157 | * @id: channel ID | |
b7937dc4 | 1158 | * @comp: driver component |
57562a72 | 1159 | */ |
f13f6981 | 1160 | int most_stop_channel(struct most_interface *iface, int id, |
45917e79 | 1161 | struct most_component *comp) |
57562a72 | 1162 | { |
fcb7fad8 | 1163 | struct most_channel *c; |
57562a72 CG |
1164 | |
1165 | if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) { | |
6a82c775 | 1166 | pr_err("Bad interface or index out of range\n"); |
57562a72 CG |
1167 | return -EINVAL; |
1168 | } | |
9136fccf | 1169 | c = iface->p->channel[id]; |
57562a72 CG |
1170 | if (unlikely(!c)) |
1171 | return -EINVAL; | |
1172 | ||
f13f6981 | 1173 | mutex_lock(&c->start_mutex); |
f898f989 | 1174 | if (c->pipe0.refs + c->pipe1.refs >= 2) |
f13f6981 | 1175 | goto out; |
57562a72 | 1176 | |
57562a72 CG |
1177 | if (c->hdm_enqueue_task) |
1178 | kthread_stop(c->hdm_enqueue_task); | |
1179 | c->hdm_enqueue_task = NULL; | |
57562a72 | 1180 | |
9cda3007 | 1181 | if (iface->mod) |
57562a72 | 1182 | module_put(iface->mod); |
57562a72 CG |
1183 | |
1184 | c->is_poisoned = true; | |
1185 | if (c->iface->poison_channel(c->iface, c->channel_id)) { | |
b7935e52 | 1186 | dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id, |
78ce8b26 | 1187 | c->iface->description); |
f13f6981 | 1188 | mutex_unlock(&c->start_mutex); |
57562a72 CG |
1189 | return -EAGAIN; |
1190 | } | |
1191 | flush_trash_fifo(c); | |
1192 | flush_channel_fifos(c); | |
1193 | ||
1194 | #ifdef CMPL_INTERRUPTIBLE | |
1195 | if (wait_for_completion_interruptible(&c->cleanup)) { | |
b7935e52 | 1196 | dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id); |
f13f6981 | 1197 | mutex_unlock(&c->start_mutex); |
57562a72 CG |
1198 | return -EINTR; |
1199 | } | |
1200 | #else | |
1201 | wait_for_completion(&c->cleanup); | |
1202 | #endif | |
1203 | c->is_poisoned = false; | |
f13f6981 CG |
1204 | |
1205 | out: | |
5a5abf02 | 1206 | if (comp == c->pipe0.comp) |
f898f989 | 1207 | c->pipe0.refs--; |
5a5abf02 | 1208 | if (comp == c->pipe1.comp) |
f898f989 | 1209 | c->pipe1.refs--; |
f13f6981 | 1210 | mutex_unlock(&c->start_mutex); |
57562a72 CG |
1211 | return 0; |
1212 | } | |
1213 | EXPORT_SYMBOL_GPL(most_stop_channel); | |
1214 | ||
1215 | /** | |
b7937dc4 CG |
1216 | * most_register_component - registers a driver component with the core |
1217 | * @comp: driver component | |
57562a72 | 1218 | */ |
45917e79 | 1219 | int most_register_component(struct most_component *comp) |
57562a72 | 1220 | { |
5a5abf02 | 1221 | if (!comp) { |
6a82c775 | 1222 | pr_err("Bad component\n"); |
57562a72 CG |
1223 | return -EINVAL; |
1224 | } | |
d693e90d | 1225 | list_add_tail(&comp->list, &comp_list); |
57562a72 CG |
1226 | return 0; |
1227 | } | |
ed021a0f | 1228 | EXPORT_SYMBOL_GPL(most_register_component); |
57562a72 | 1229 | |
9136fccf CG |
1230 | static int disconnect_channels(struct device *dev, void *data) |
1231 | { | |
1232 | struct most_interface *iface; | |
1233 | struct most_channel *c, *tmp; | |
45917e79 | 1234 | struct most_component *comp = data; |
9136fccf | 1235 | |
723de0f9 | 1236 | iface = dev_get_drvdata(dev); |
9136fccf | 1237 | list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { |
5a5abf02 CG |
1238 | if (c->pipe0.comp == comp || c->pipe1.comp == comp) |
1239 | comp->disconnect_channel(c->iface, c->channel_id); | |
1240 | if (c->pipe0.comp == comp) | |
1241 | c->pipe0.comp = NULL; | |
1242 | if (c->pipe1.comp == comp) | |
1243 | c->pipe1.comp = NULL; | |
9136fccf CG |
1244 | } |
1245 | return 0; | |
1246 | } | |
1247 | ||
57562a72 | 1248 | /** |
b7937dc4 CG |
1249 | * most_deregister_component - deregisters a driver component with the core |
1250 | * @comp: driver component | |
57562a72 | 1251 | */ |
45917e79 | 1252 | int most_deregister_component(struct most_component *comp) |
57562a72 | 1253 | { |
5a5abf02 | 1254 | if (!comp) { |
6a82c775 | 1255 | pr_err("Bad component\n"); |
57562a72 CG |
1256 | return -EINVAL; |
1257 | } | |
1258 | ||
d693e90d | 1259 | bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels); |
5a5abf02 | 1260 | list_del(&comp->list); |
57562a72 CG |
1261 | return 0; |
1262 | } | |
ed021a0f | 1263 | EXPORT_SYMBOL_GPL(most_deregister_component); |
57562a72 | 1264 | |
4d5f022f CG |
1265 | static void release_channel(struct device *dev) |
1266 | { | |
723de0f9 CG |
1267 | struct most_channel *c = to_channel(dev); |
1268 | ||
1269 | kfree(c); | |
4d5f022f CG |
1270 | } |
1271 | ||
57562a72 CG |
1272 | /** |
1273 | * most_register_interface - registers an interface with core | |
b7937dc4 | 1274 | * @iface: device interface |
57562a72 CG |
1275 | * |
1276 | * Allocates and initializes a new interface instance and all of its channels. | |
1277 | * Returns a pointer to kobject or an error pointer. | |
1278 | */ | |
4d5f022f | 1279 | int most_register_interface(struct most_interface *iface) |
57562a72 CG |
1280 | { |
1281 | unsigned int i; | |
1282 | int id; | |
fcb7fad8 | 1283 | struct most_channel *c; |
57562a72 CG |
1284 | |
1285 | if (!iface || !iface->enqueue || !iface->configure || | |
e4463e49 | 1286 | !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) |
4d5f022f | 1287 | return -EINVAL; |
57562a72 | 1288 | |
b737a221 | 1289 | id = ida_alloc(&mdev_id, GFP_KERNEL); |
57562a72 | 1290 | if (id < 0) { |
b7935e52 | 1291 | dev_err(iface->dev, "Failed to allocate device ID\n"); |
4d5f022f | 1292 | return id; |
57562a72 | 1293 | } |
57562a72 | 1294 | |
9136fccf CG |
1295 | iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL); |
1296 | if (!iface->p) { | |
b737a221 | 1297 | ida_free(&mdev_id, id); |
4d5f022f | 1298 | return -ENOMEM; |
57562a72 CG |
1299 | } |
1300 | ||
9136fccf CG |
1301 | INIT_LIST_HEAD(&iface->p->channel_list); |
1302 | iface->p->dev_id = id; | |
3970d0d8 | 1303 | strscpy(iface->p->name, iface->description, sizeof(iface->p->name)); |
d693e90d | 1304 | iface->dev->bus = &mostbus; |
723de0f9 CG |
1305 | iface->dev->groups = interface_attr_groups; |
1306 | dev_set_drvdata(iface->dev, iface); | |
1307 | if (device_register(iface->dev)) { | |
b7935e52 | 1308 | dev_err(iface->dev, "Failed to register interface device\n"); |
9136fccf | 1309 | kfree(iface->p); |
723de0f9 | 1310 | put_device(iface->dev); |
b737a221 | 1311 | ida_free(&mdev_id, id); |
4d5f022f CG |
1312 | return -ENOMEM; |
1313 | } | |
57562a72 CG |
1314 | |
1315 | for (i = 0; i < iface->num_channels; i++) { | |
1316 | const char *name_suffix = iface->channel_vector[i].name_suffix; | |
1317 | ||
4d5f022f | 1318 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
57562a72 | 1319 | if (!c) |
bddd3c25 | 1320 | goto err_free_resources; |
845101be CG |
1321 | if (!name_suffix) |
1322 | snprintf(c->name, STRING_SIZE, "ch%d", i); | |
1323 | else | |
1324 | snprintf(c->name, STRING_SIZE, "%s", name_suffix); | |
1325 | c->dev.init_name = c->name; | |
723de0f9 | 1326 | c->dev.parent = iface->dev; |
4d5f022f CG |
1327 | c->dev.groups = channel_attr_groups; |
1328 | c->dev.release = release_channel; | |
9136fccf | 1329 | iface->p->channel[i] = c; |
57562a72 CG |
1330 | c->is_starving = 0; |
1331 | c->iface = iface; | |
57562a72 CG |
1332 | c->channel_id = i; |
1333 | c->keep_mbo = false; | |
1334 | c->enqueue_halt = false; | |
1335 | c->is_poisoned = false; | |
57562a72 CG |
1336 | c->cfg.direction = 0; |
1337 | c->cfg.data_type = 0; | |
1338 | c->cfg.num_buffers = 0; | |
1339 | c->cfg.buffer_size = 0; | |
1340 | c->cfg.subbuffer_size = 0; | |
1341 | c->cfg.packets_per_xact = 0; | |
1342 | spin_lock_init(&c->fifo_lock); | |
1343 | INIT_LIST_HEAD(&c->fifo); | |
1344 | INIT_LIST_HEAD(&c->trash_fifo); | |
1345 | INIT_LIST_HEAD(&c->halt_fifo); | |
1346 | init_completion(&c->cleanup); | |
1347 | atomic_set(&c->mbo_ref, 0); | |
f13f6981 | 1348 | mutex_init(&c->start_mutex); |
bf9503f1 | 1349 | mutex_init(&c->nq_mutex); |
9136fccf | 1350 | list_add_tail(&c->list, &iface->p->channel_list); |
f0b4a22a | 1351 | if (device_register(&c->dev)) { |
b7935e52 | 1352 | dev_err(&c->dev, "Failed to register channel device\n"); |
bddd3c25 | 1353 | goto err_free_most_channel; |
f0b4a22a | 1354 | } |
57562a72 | 1355 | } |
acdbb897 | 1356 | most_interface_register_notify(iface->description); |
4d5f022f | 1357 | return 0; |
57562a72 | 1358 | |
bddd3c25 | 1359 | err_free_most_channel: |
723de0f9 | 1360 | put_device(&c->dev); |
9136fccf | 1361 | |
bddd3c25 | 1362 | err_free_resources: |
9136fccf CG |
1363 | while (i > 0) { |
1364 | c = iface->p->channel[--i]; | |
1365 | device_unregister(&c->dev); | |
9136fccf CG |
1366 | } |
1367 | kfree(iface->p); | |
723de0f9 | 1368 | device_unregister(iface->dev); |
b737a221 | 1369 | ida_free(&mdev_id, id); |
4d5f022f | 1370 | return -ENOMEM; |
57562a72 CG |
1371 | } |
1372 | EXPORT_SYMBOL_GPL(most_register_interface); | |
1373 | ||
1374 | /** | |
1375 | * most_deregister_interface - deregisters an interface with core | |
b7937dc4 | 1376 | * @iface: device interface |
57562a72 CG |
1377 | * |
1378 | * Before removing an interface instance from the list, all running | |
1379 | * channels are stopped and poisoned. | |
1380 | */ | |
1381 | void most_deregister_interface(struct most_interface *iface) | |
1382 | { | |
4d5f022f | 1383 | int i; |
fcb7fad8 | 1384 | struct most_channel *c; |
57562a72 | 1385 | |
4d5f022f | 1386 | for (i = 0; i < iface->num_channels; i++) { |
9136fccf | 1387 | c = iface->p->channel[i]; |
5a5abf02 CG |
1388 | if (c->pipe0.comp) |
1389 | c->pipe0.comp->disconnect_channel(c->iface, | |
a0fceb1f | 1390 | c->channel_id); |
5a5abf02 CG |
1391 | if (c->pipe1.comp) |
1392 | c->pipe1.comp->disconnect_channel(c->iface, | |
a0fceb1f | 1393 | c->channel_id); |
5a5abf02 CG |
1394 | c->pipe0.comp = NULL; |
1395 | c->pipe1.comp = NULL; | |
4d5f022f CG |
1396 | list_del(&c->list); |
1397 | device_unregister(&c->dev); | |
a0fceb1f CG |
1398 | } |
1399 | ||
b737a221 | 1400 | ida_free(&mdev_id, iface->p->dev_id); |
9136fccf | 1401 | kfree(iface->p); |
723de0f9 | 1402 | device_unregister(iface->dev); |
57562a72 CG |
1403 | } |
1404 | EXPORT_SYMBOL_GPL(most_deregister_interface); | |
1405 | ||
1406 | /** | |
1407 | * most_stop_enqueue - prevents core from enqueueing MBOs | |
1408 | * @iface: pointer to interface | |
1409 | * @id: channel id | |
1410 | * | |
1411 | * This is called by an HDM that _cannot_ attend to its duties and | |
1412 | * is imminent to get run over by the core. The core is not going to | |
1413 | * enqueue any further packets unless the flagging HDM calls | |
1414 | * most_resume enqueue(). | |
1415 | */ | |
1416 | void most_stop_enqueue(struct most_interface *iface, int id) | |
1417 | { | |
9136fccf | 1418 | struct most_channel *c = iface->p->channel[id]; |
57562a72 | 1419 | |
bf9503f1 CG |
1420 | if (!c) |
1421 | return; | |
1422 | ||
1423 | mutex_lock(&c->nq_mutex); | |
1424 | c->enqueue_halt = true; | |
1425 | mutex_unlock(&c->nq_mutex); | |
57562a72 CG |
1426 | } |
1427 | EXPORT_SYMBOL_GPL(most_stop_enqueue); | |
1428 | ||
1429 | /** | |
1430 | * most_resume_enqueue - allow core to enqueue MBOs again | |
1431 | * @iface: pointer to interface | |
1432 | * @id: channel id | |
1433 | * | |
1434 | * This clears the enqueue halt flag and enqueues all MBOs currently | |
1435 | * sitting in the wait fifo. | |
1436 | */ | |
1437 | void most_resume_enqueue(struct most_interface *iface, int id) | |
1438 | { | |
9136fccf | 1439 | struct most_channel *c = iface->p->channel[id]; |
57562a72 | 1440 | |
bf9503f1 | 1441 | if (!c) |
57562a72 | 1442 | return; |
bf9503f1 CG |
1443 | |
1444 | mutex_lock(&c->nq_mutex); | |
57562a72 | 1445 | c->enqueue_halt = false; |
bf9503f1 | 1446 | mutex_unlock(&c->nq_mutex); |
57562a72 CG |
1447 | |
1448 | wake_up_interruptible(&c->hdm_fifo_wq); | |
1449 | } | |
1450 | EXPORT_SYMBOL_GPL(most_resume_enqueue); | |
1451 | ||
1452 | static int __init most_init(void) | |
1453 | { | |
cc4188b6 SM |
1454 | int err; |
1455 | ||
d693e90d | 1456 | INIT_LIST_HEAD(&comp_list); |
57562a72 CG |
1457 | ida_init(&mdev_id); |
1458 | ||
d693e90d | 1459 | err = bus_register(&mostbus); |
cc4188b6 | 1460 | if (err) { |
b7935e52 | 1461 | pr_err("Failed to register most bus\n"); |
cc4188b6 | 1462 | return err; |
57562a72 | 1463 | } |
d693e90d | 1464 | err = driver_register(&mostbus_driver); |
cc4188b6 | 1465 | if (err) { |
b7935e52 | 1466 | pr_err("Failed to register core driver\n"); |
bddd3c25 | 1467 | goto err_unregister_bus; |
57562a72 | 1468 | } |
919c03ae | 1469 | configfs_init(); |
57562a72 CG |
1470 | return 0; |
1471 | ||
bddd3c25 | 1472 | err_unregister_bus: |
d693e90d | 1473 | bus_unregister(&mostbus); |
cc4188b6 | 1474 | return err; |
57562a72 CG |
1475 | } |
1476 | ||
1477 | static void __exit most_exit(void) | |
1478 | { | |
d693e90d CG |
1479 | driver_unregister(&mostbus_driver); |
1480 | bus_unregister(&mostbus); | |
57562a72 CG |
1481 | ida_destroy(&mdev_id); |
1482 | } | |
1483 | ||
5e56bc06 | 1484 | subsys_initcall(most_init); |
57562a72 CG |
1485 | module_exit(most_exit); |
1486 | MODULE_LICENSE("GPL"); | |
1487 | MODULE_AUTHOR("Christian Gromm <[email protected]>"); | |
1488 | MODULE_DESCRIPTION("Core module of stacked MOST Linux driver"); |