]> Git Repo - J-linux.git/blob - drivers/s390/net/ism_drv.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / drivers / s390 / net / ism_drv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/ctype.h>
16 #include <linux/processor.h>
17
18 #include "ism.h"
19
20 MODULE_DESCRIPTION("ISM driver for s390");
21 MODULE_LICENSE("GPL");
22
23 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
24 #define DRV_NAME "ism"
25
26 static const struct pci_device_id ism_device_table[] = {
27         { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
28         { 0, }
29 };
30 MODULE_DEVICE_TABLE(pci, ism_device_table);
31
32 static debug_info_t *ism_debug_info;
33 static const struct smcd_ops ism_ops;
34
35 #define NO_CLIENT               0xff            /* must be >= MAX_CLIENTS */
36 static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
37                                                 /* a list for fast mapping  */
38 static u8 max_client;
39 static DEFINE_SPINLOCK(clients_lock);
40 struct ism_dev_list {
41         struct list_head list;
42         struct mutex mutex; /* protects ism device list */
43 };
44
45 static struct ism_dev_list ism_dev_list = {
46         .list = LIST_HEAD_INIT(ism_dev_list.list),
47         .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
48 };
49
50 int ism_register_client(struct ism_client *client)
51 {
52         struct ism_dev *ism;
53         unsigned long flags;
54         int i, rc = -ENOSPC;
55
56         mutex_lock(&ism_dev_list.mutex);
57         spin_lock_irqsave(&clients_lock, flags);
58         for (i = 0; i < MAX_CLIENTS; ++i) {
59                 if (!clients[i]) {
60                         clients[i] = client;
61                         client->id = i;
62                         if (i == max_client)
63                                 max_client++;
64                         rc = 0;
65                         break;
66                 }
67         }
68         spin_unlock_irqrestore(&clients_lock, flags);
69         if (i < MAX_CLIENTS) {
70                 /* initialize with all devices that we got so far */
71                 list_for_each_entry(ism, &ism_dev_list.list, list) {
72                         ism->priv[i] = NULL;
73                         client->add(ism);
74                 }
75         }
76         mutex_unlock(&ism_dev_list.mutex);
77
78         return rc;
79 }
80 EXPORT_SYMBOL_GPL(ism_register_client);
81
82 int ism_unregister_client(struct ism_client *client)
83 {
84         struct ism_dev *ism;
85         unsigned long flags;
86         int rc = 0;
87
88         mutex_lock(&ism_dev_list.mutex);
89         spin_lock_irqsave(&clients_lock, flags);
90         clients[client->id] = NULL;
91         if (client->id + 1 == max_client)
92                 max_client--;
93         spin_unlock_irqrestore(&clients_lock, flags);
94         list_for_each_entry(ism, &ism_dev_list.list, list) {
95                 for (int i = 0; i < ISM_NR_DMBS; ++i) {
96                         if (ism->sba_client_arr[i] == client->id) {
97                                 pr_err("%s: attempt to unregister client '%s'"
98                                        "with registered dmb(s)\n", __func__,
99                                        client->name);
100                                 rc = -EBUSY;
101                                 goto out;
102                         }
103                 }
104         }
105 out:
106         mutex_unlock(&ism_dev_list.mutex);
107
108         return rc;
109 }
110 EXPORT_SYMBOL_GPL(ism_unregister_client);
111
112 static int ism_cmd(struct ism_dev *ism, void *cmd)
113 {
114         struct ism_req_hdr *req = cmd;
115         struct ism_resp_hdr *resp = cmd;
116
117         __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
118         __ism_write_cmd(ism, req, 0, sizeof(*req));
119
120         WRITE_ONCE(resp->ret, ISM_ERROR);
121
122         __ism_read_cmd(ism, resp, 0, sizeof(*resp));
123         if (resp->ret) {
124                 debug_text_event(ism_debug_info, 0, "cmd failure");
125                 debug_event(ism_debug_info, 0, resp, sizeof(*resp));
126                 goto out;
127         }
128         __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
129 out:
130         return resp->ret;
131 }
132
133 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
134 {
135         union ism_cmd_simple cmd;
136
137         memset(&cmd, 0, sizeof(cmd));
138         cmd.request.hdr.cmd = cmd_code;
139         cmd.request.hdr.len = sizeof(cmd.request);
140
141         return ism_cmd(ism, &cmd);
142 }
143
144 static int query_info(struct ism_dev *ism)
145 {
146         union ism_qi cmd;
147
148         memset(&cmd, 0, sizeof(cmd));
149         cmd.request.hdr.cmd = ISM_QUERY_INFO;
150         cmd.request.hdr.len = sizeof(cmd.request);
151
152         if (ism_cmd(ism, &cmd))
153                 goto out;
154
155         debug_text_event(ism_debug_info, 3, "query info");
156         debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
157 out:
158         return 0;
159 }
160
161 static int register_sba(struct ism_dev *ism)
162 {
163         union ism_reg_sba cmd;
164         dma_addr_t dma_handle;
165         struct ism_sba *sba;
166
167         sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
168                                  GFP_KERNEL);
169         if (!sba)
170                 return -ENOMEM;
171
172         memset(&cmd, 0, sizeof(cmd));
173         cmd.request.hdr.cmd = ISM_REG_SBA;
174         cmd.request.hdr.len = sizeof(cmd.request);
175         cmd.request.sba = dma_handle;
176
177         if (ism_cmd(ism, &cmd)) {
178                 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
179                 return -EIO;
180         }
181
182         ism->sba = sba;
183         ism->sba_dma_addr = dma_handle;
184
185         return 0;
186 }
187
188 static int register_ieq(struct ism_dev *ism)
189 {
190         union ism_reg_ieq cmd;
191         dma_addr_t dma_handle;
192         struct ism_eq *ieq;
193
194         ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
195                                  GFP_KERNEL);
196         if (!ieq)
197                 return -ENOMEM;
198
199         memset(&cmd, 0, sizeof(cmd));
200         cmd.request.hdr.cmd = ISM_REG_IEQ;
201         cmd.request.hdr.len = sizeof(cmd.request);
202         cmd.request.ieq = dma_handle;
203         cmd.request.len = sizeof(*ieq);
204
205         if (ism_cmd(ism, &cmd)) {
206                 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
207                 return -EIO;
208         }
209
210         ism->ieq = ieq;
211         ism->ieq_idx = -1;
212         ism->ieq_dma_addr = dma_handle;
213
214         return 0;
215 }
216
217 static int unregister_sba(struct ism_dev *ism)
218 {
219         int ret;
220
221         if (!ism->sba)
222                 return 0;
223
224         ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
225         if (ret && ret != ISM_ERROR)
226                 return -EIO;
227
228         dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
229                           ism->sba, ism->sba_dma_addr);
230
231         ism->sba = NULL;
232         ism->sba_dma_addr = 0;
233
234         return 0;
235 }
236
237 static int unregister_ieq(struct ism_dev *ism)
238 {
239         int ret;
240
241         if (!ism->ieq)
242                 return 0;
243
244         ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
245         if (ret && ret != ISM_ERROR)
246                 return -EIO;
247
248         dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
249                           ism->ieq, ism->ieq_dma_addr);
250
251         ism->ieq = NULL;
252         ism->ieq_dma_addr = 0;
253
254         return 0;
255 }
256
257 static int ism_read_local_gid(struct ism_dev *ism)
258 {
259         union ism_read_gid cmd;
260         int ret;
261
262         memset(&cmd, 0, sizeof(cmd));
263         cmd.request.hdr.cmd = ISM_READ_GID;
264         cmd.request.hdr.len = sizeof(cmd.request);
265
266         ret = ism_cmd(ism, &cmd);
267         if (ret)
268                 goto out;
269
270         ism->local_gid = cmd.response.gid;
271 out:
272         return ret;
273 }
274
275 static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
276                           u32 vid)
277 {
278         union ism_query_rgid cmd;
279
280         memset(&cmd, 0, sizeof(cmd));
281         cmd.request.hdr.cmd = ISM_QUERY_RGID;
282         cmd.request.hdr.len = sizeof(cmd.request);
283
284         cmd.request.rgid = rgid;
285         cmd.request.vlan_valid = vid_valid;
286         cmd.request.vlan_id = vid;
287
288         return ism_cmd(ism, &cmd);
289 }
290
291 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
292 {
293         clear_bit(dmb->sba_idx, ism->sba_bitmap);
294         dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
295                           dmb->cpu_addr, dmb->dma_addr);
296 }
297
298 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
299 {
300         unsigned long bit;
301
302         if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
303                 return -EINVAL;
304
305         if (!dmb->sba_idx) {
306                 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
307                                          ISM_DMB_BIT_OFFSET);
308                 if (bit == ISM_NR_DMBS)
309                         return -ENOSPC;
310
311                 dmb->sba_idx = bit;
312         }
313         if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
314             test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
315                 return -EINVAL;
316
317         dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
318                                            &dmb->dma_addr,
319                                            GFP_KERNEL | __GFP_NOWARN |
320                                            __GFP_NOMEMALLOC | __GFP_NORETRY);
321         if (!dmb->cpu_addr)
322                 clear_bit(dmb->sba_idx, ism->sba_bitmap);
323
324         return dmb->cpu_addr ? 0 : -ENOMEM;
325 }
326
327 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
328                      struct ism_client *client)
329 {
330         union ism_reg_dmb cmd;
331         int ret;
332
333         ret = ism_alloc_dmb(ism, dmb);
334         if (ret)
335                 goto out;
336
337         memset(&cmd, 0, sizeof(cmd));
338         cmd.request.hdr.cmd = ISM_REG_DMB;
339         cmd.request.hdr.len = sizeof(cmd.request);
340
341         cmd.request.dmb = dmb->dma_addr;
342         cmd.request.dmb_len = dmb->dmb_len;
343         cmd.request.sba_idx = dmb->sba_idx;
344         cmd.request.vlan_valid = dmb->vlan_valid;
345         cmd.request.vlan_id = dmb->vlan_id;
346         cmd.request.rgid = dmb->rgid;
347
348         ret = ism_cmd(ism, &cmd);
349         if (ret) {
350                 ism_free_dmb(ism, dmb);
351                 goto out;
352         }
353         dmb->dmb_tok = cmd.response.dmb_tok;
354         ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
355 out:
356         return ret;
357 }
358 EXPORT_SYMBOL_GPL(ism_register_dmb);
359
360 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
361 {
362         union ism_unreg_dmb cmd;
363         int ret;
364
365         memset(&cmd, 0, sizeof(cmd));
366         cmd.request.hdr.cmd = ISM_UNREG_DMB;
367         cmd.request.hdr.len = sizeof(cmd.request);
368
369         cmd.request.dmb_tok = dmb->dmb_tok;
370
371         ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
372
373         ret = ism_cmd(ism, &cmd);
374         if (ret && ret != ISM_ERROR)
375                 goto out;
376
377         ism_free_dmb(ism, dmb);
378 out:
379         return ret;
380 }
381 EXPORT_SYMBOL_GPL(ism_unregister_dmb);
382
383 static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
384 {
385         union ism_set_vlan_id cmd;
386
387         memset(&cmd, 0, sizeof(cmd));
388         cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
389         cmd.request.hdr.len = sizeof(cmd.request);
390
391         cmd.request.vlan_id = vlan_id;
392
393         return ism_cmd(ism, &cmd);
394 }
395
396 static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
397 {
398         union ism_set_vlan_id cmd;
399
400         memset(&cmd, 0, sizeof(cmd));
401         cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
402         cmd.request.hdr.len = sizeof(cmd.request);
403
404         cmd.request.vlan_id = vlan_id;
405
406         return ism_cmd(ism, &cmd);
407 }
408
409 static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
410                           u32 event_code, u64 info)
411 {
412         union ism_sig_ieq cmd;
413
414         memset(&cmd, 0, sizeof(cmd));
415         cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
416         cmd.request.hdr.len = sizeof(cmd.request);
417
418         cmd.request.rgid = rgid;
419         cmd.request.trigger_irq = trigger_irq;
420         cmd.request.event_code = event_code;
421         cmd.request.info = info;
422
423         return ism_cmd(ism, &cmd);
424 }
425
426 static unsigned int max_bytes(unsigned int start, unsigned int len,
427                               unsigned int boundary)
428 {
429         return min(boundary - (start & (boundary - 1)), len);
430 }
431
432 int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
433              unsigned int offset, void *data, unsigned int size)
434 {
435         unsigned int bytes;
436         u64 dmb_req;
437         int ret;
438
439         while (size) {
440                 bytes = max_bytes(offset, size, PAGE_SIZE);
441                 dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
442                                          offset);
443
444                 ret = __ism_move(ism, dmb_req, data, bytes);
445                 if (ret)
446                         return ret;
447
448                 size -= bytes;
449                 data += bytes;
450                 offset += bytes;
451         }
452
453         return 0;
454 }
455 EXPORT_SYMBOL_GPL(ism_move);
456
457 static struct ism_systemeid SYSTEM_EID = {
458         .seid_string = "IBM-SYSZ-ISMSEID00000000",
459         .serial_number = "0000",
460         .type = "0000",
461 };
462
463 static void ism_create_system_eid(void)
464 {
465         struct cpuid id;
466         u16 ident_tail;
467         char tmp[5];
468
469         get_cpu_id(&id);
470         ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
471         snprintf(tmp, 5, "%04X", ident_tail);
472         memcpy(&SYSTEM_EID.serial_number, tmp, 4);
473         snprintf(tmp, 5, "%04X", id.machine);
474         memcpy(&SYSTEM_EID.type, tmp, 4);
475 }
476
477 u8 *ism_get_seid(void)
478 {
479         return SYSTEM_EID.seid_string;
480 }
481 EXPORT_SYMBOL_GPL(ism_get_seid);
482
483 static u16 ism_get_chid(struct ism_dev *ism)
484 {
485         if (!ism || !ism->pdev)
486                 return 0;
487
488         return to_zpci(ism->pdev)->pchid;
489 }
490
491 static void ism_handle_event(struct ism_dev *ism)
492 {
493         struct ism_event *entry;
494         int i;
495
496         while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
497                 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
498                         ism->ieq_idx = 0;
499
500                 entry = &ism->ieq->entry[ism->ieq_idx];
501                 debug_event(ism_debug_info, 2, entry, sizeof(*entry));
502                 spin_lock(&clients_lock);
503                 for (i = 0; i < max_client; ++i)
504                         if (clients[i])
505                                 clients[i]->handle_event(ism, entry);
506                 spin_unlock(&clients_lock);
507         }
508 }
509
510 static irqreturn_t ism_handle_irq(int irq, void *data)
511 {
512         struct ism_dev *ism = data;
513         struct ism_client *clt;
514         unsigned long bit, end;
515         unsigned long *bv;
516         u16 dmbemask;
517
518         bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
519         end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
520
521         spin_lock(&ism->lock);
522         ism->sba->s = 0;
523         barrier();
524         for (bit = 0;;) {
525                 bit = find_next_bit_inv(bv, end, bit);
526                 if (bit >= end)
527                         break;
528
529                 clear_bit_inv(bit, bv);
530                 dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
531                 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
532                 barrier();
533                 clt = clients[ism->sba_client_arr[bit]];
534                 clt->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
535         }
536
537         if (ism->sba->e) {
538                 ism->sba->e = 0;
539                 barrier();
540                 ism_handle_event(ism);
541         }
542         spin_unlock(&ism->lock);
543         return IRQ_HANDLED;
544 }
545
546 static u64 ism_get_local_gid(struct ism_dev *ism)
547 {
548         return ism->local_gid;
549 }
550
551 static void ism_dev_add_work_func(struct work_struct *work)
552 {
553         struct ism_client *client = container_of(work, struct ism_client,
554                                                  add_work);
555
556         client->add(client->tgt_ism);
557         atomic_dec(&client->tgt_ism->add_dev_cnt);
558         wake_up(&client->tgt_ism->waitq);
559 }
560
561 static int ism_dev_init(struct ism_dev *ism)
562 {
563         struct pci_dev *pdev = ism->pdev;
564         unsigned long flags;
565         int i, ret;
566
567         ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
568         if (ret <= 0)
569                 goto out;
570
571         ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
572         if (!ism->sba_client_arr)
573                 goto free_vectors;
574         memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
575
576         ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
577                           pci_name(pdev), ism);
578         if (ret)
579                 goto free_client_arr;
580
581         ret = register_sba(ism);
582         if (ret)
583                 goto free_irq;
584
585         ret = register_ieq(ism);
586         if (ret)
587                 goto unreg_sba;
588
589         ret = ism_read_local_gid(ism);
590         if (ret)
591                 goto unreg_ieq;
592
593         if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
594                 /* hardware is V2 capable */
595                 ism_create_system_eid();
596
597         init_waitqueue_head(&ism->waitq);
598         atomic_set(&ism->free_clients_cnt, 0);
599         atomic_set(&ism->add_dev_cnt, 0);
600
601         wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
602         spin_lock_irqsave(&clients_lock, flags);
603         for (i = 0; i < max_client; ++i)
604                 if (clients[i]) {
605                         INIT_WORK(&clients[i]->add_work,
606                                   ism_dev_add_work_func);
607                         clients[i]->tgt_ism = ism;
608                         atomic_inc(&ism->add_dev_cnt);
609                         schedule_work(&clients[i]->add_work);
610                 }
611         spin_unlock_irqrestore(&clients_lock, flags);
612
613         wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
614
615         mutex_lock(&ism_dev_list.mutex);
616         list_add(&ism->list, &ism_dev_list.list);
617         mutex_unlock(&ism_dev_list.mutex);
618
619         query_info(ism);
620         return 0;
621
622 unreg_ieq:
623         unregister_ieq(ism);
624 unreg_sba:
625         unregister_sba(ism);
626 free_irq:
627         free_irq(pci_irq_vector(pdev, 0), ism);
628 free_client_arr:
629         kfree(ism->sba_client_arr);
630 free_vectors:
631         pci_free_irq_vectors(pdev);
632 out:
633         return ret;
634 }
635
636 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
637 {
638         struct ism_dev *ism;
639         int ret;
640
641         ism = kzalloc(sizeof(*ism), GFP_KERNEL);
642         if (!ism)
643                 return -ENOMEM;
644
645         spin_lock_init(&ism->lock);
646         dev_set_drvdata(&pdev->dev, ism);
647         ism->pdev = pdev;
648         ism->dev.parent = &pdev->dev;
649         device_initialize(&ism->dev);
650         dev_set_name(&ism->dev, dev_name(&pdev->dev));
651         ret = device_add(&ism->dev);
652         if (ret)
653                 goto err_dev;
654
655         ret = pci_enable_device_mem(pdev);
656         if (ret)
657                 goto err;
658
659         ret = pci_request_mem_regions(pdev, DRV_NAME);
660         if (ret)
661                 goto err_disable;
662
663         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
664         if (ret)
665                 goto err_resource;
666
667         dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
668         dma_set_max_seg_size(&pdev->dev, SZ_1M);
669         pci_set_master(pdev);
670
671         ret = ism_dev_init(ism);
672         if (ret)
673                 goto err_resource;
674
675         return 0;
676
677 err_resource:
678         pci_release_mem_regions(pdev);
679 err_disable:
680         pci_disable_device(pdev);
681 err:
682         device_del(&ism->dev);
683 err_dev:
684         dev_set_drvdata(&pdev->dev, NULL);
685         kfree(ism);
686
687         return ret;
688 }
689
690 static void ism_dev_remove_work_func(struct work_struct *work)
691 {
692         struct ism_client *client = container_of(work, struct ism_client,
693                                                  remove_work);
694
695         client->remove(client->tgt_ism);
696         atomic_dec(&client->tgt_ism->free_clients_cnt);
697         wake_up(&client->tgt_ism->waitq);
698 }
699
700 /* Callers must hold ism_dev_list.mutex */
701 static void ism_dev_exit(struct ism_dev *ism)
702 {
703         struct pci_dev *pdev = ism->pdev;
704         unsigned long flags;
705         int i;
706
707         wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
708         spin_lock_irqsave(&clients_lock, flags);
709         for (i = 0; i < max_client; ++i)
710                 if (clients[i]) {
711                         INIT_WORK(&clients[i]->remove_work,
712                                   ism_dev_remove_work_func);
713                         clients[i]->tgt_ism = ism;
714                         atomic_inc(&ism->free_clients_cnt);
715                         schedule_work(&clients[i]->remove_work);
716                 }
717         spin_unlock_irqrestore(&clients_lock, flags);
718
719         wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
720
721         if (SYSTEM_EID.serial_number[0] != '0' ||
722             SYSTEM_EID.type[0] != '0')
723                 ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
724         unregister_ieq(ism);
725         unregister_sba(ism);
726         free_irq(pci_irq_vector(pdev, 0), ism);
727         kfree(ism->sba_client_arr);
728         pci_free_irq_vectors(pdev);
729         list_del_init(&ism->list);
730 }
731
732 static void ism_remove(struct pci_dev *pdev)
733 {
734         struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
735
736         mutex_lock(&ism_dev_list.mutex);
737         ism_dev_exit(ism);
738         mutex_unlock(&ism_dev_list.mutex);
739
740         pci_release_mem_regions(pdev);
741         pci_disable_device(pdev);
742         device_del(&ism->dev);
743         dev_set_drvdata(&pdev->dev, NULL);
744         kfree(ism);
745 }
746
747 static struct pci_driver ism_driver = {
748         .name     = DRV_NAME,
749         .id_table = ism_device_table,
750         .probe    = ism_probe,
751         .remove   = ism_remove,
752 };
753
754 static int __init ism_init(void)
755 {
756         int ret;
757
758         ism_debug_info = debug_register("ism", 2, 1, 16);
759         if (!ism_debug_info)
760                 return -ENODEV;
761
762         memset(clients, 0, sizeof(clients));
763         max_client = 0;
764         debug_register_view(ism_debug_info, &debug_hex_ascii_view);
765         ret = pci_register_driver(&ism_driver);
766         if (ret)
767                 debug_unregister(ism_debug_info);
768
769         return ret;
770 }
771
772 static void __exit ism_exit(void)
773 {
774         struct ism_dev *ism;
775
776         mutex_lock(&ism_dev_list.mutex);
777         list_for_each_entry(ism, &ism_dev_list.list, list) {
778                 ism_dev_exit(ism);
779         }
780         mutex_unlock(&ism_dev_list.mutex);
781
782         pci_unregister_driver(&ism_driver);
783         debug_unregister(ism_debug_info);
784 }
785
786 module_init(ism_init);
787 module_exit(ism_exit);
788
789 /*************************** SMC-D Implementation *****************************/
790
791 #if IS_ENABLED(CONFIG_SMC)
792 static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
793                            u32 vid)
794 {
795         return ism_query_rgid(smcd->priv, rgid, vid_valid, vid);
796 }
797
798 static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
799                              struct ism_client *client)
800 {
801         return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
802 }
803
804 static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
805 {
806         return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
807 }
808
809 static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
810 {
811         return ism_add_vlan_id(smcd->priv, vlan_id);
812 }
813
814 static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
815 {
816         return ism_del_vlan_id(smcd->priv, vlan_id);
817 }
818
819 static int smcd_set_vlan_required(struct smcd_dev *smcd)
820 {
821         return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
822 }
823
824 static int smcd_reset_vlan_required(struct smcd_dev *smcd)
825 {
826         return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
827 }
828
829 static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
830                            u32 event_code, u64 info)
831 {
832         return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info);
833 }
834
835 static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
836                      bool sf, unsigned int offset, void *data,
837                      unsigned int size)
838 {
839         return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
840 }
841
842 static int smcd_supports_v2(void)
843 {
844         return SYSTEM_EID.serial_number[0] != '0' ||
845                 SYSTEM_EID.type[0] != '0';
846 }
847
848 static u64 smcd_get_local_gid(struct smcd_dev *smcd)
849 {
850         return ism_get_local_gid(smcd->priv);
851 }
852
853 static u16 smcd_get_chid(struct smcd_dev *smcd)
854 {
855         return ism_get_chid(smcd->priv);
856 }
857
858 static inline struct device *smcd_get_dev(struct smcd_dev *dev)
859 {
860         struct ism_dev *ism = dev->priv;
861
862         return &ism->dev;
863 }
864
865 static const struct smcd_ops ism_ops = {
866         .query_remote_gid = smcd_query_rgid,
867         .register_dmb = smcd_register_dmb,
868         .unregister_dmb = smcd_unregister_dmb,
869         .add_vlan_id = smcd_add_vlan_id,
870         .del_vlan_id = smcd_del_vlan_id,
871         .set_vlan_required = smcd_set_vlan_required,
872         .reset_vlan_required = smcd_reset_vlan_required,
873         .signal_event = smcd_signal_ieq,
874         .move_data = smcd_move,
875         .supports_v2 = smcd_supports_v2,
876         .get_system_eid = ism_get_seid,
877         .get_local_gid = smcd_get_local_gid,
878         .get_chid = smcd_get_chid,
879         .get_dev = smcd_get_dev,
880 };
881
882 const struct smcd_ops *ism_get_smcd_ops(void)
883 {
884         return &ism_ops;
885 }
886 EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
887 #endif
This page took 0.078319 seconds and 4 git commands to generate.