]> Git Repo - linux.git/blob - drivers/ntb/hw/intel/ntb_hw_intel.c
Merge tag 'seccomp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux...
[linux.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * Intel PCIe NTB Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <[email protected]>
49  */
50
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/init.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/random.h>
58 #include <linux/slab.h>
59 #include <linux/ntb.h>
60
61 #include "ntb_hw_intel.h"
62
63 #define NTB_NAME        "ntb_hw_intel"
64 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
65 #define NTB_VER         "2.0"
66
67 MODULE_DESCRIPTION(NTB_DESC);
68 MODULE_VERSION(NTB_VER);
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_AUTHOR("Intel Corporation");
71
72 #define bar0_off(base, bar) ((base) + ((bar) << 2))
73 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75 static const struct intel_ntb_reg atom_reg;
76 static const struct intel_ntb_alt_reg atom_pri_reg;
77 static const struct intel_ntb_alt_reg atom_sec_reg;
78 static const struct intel_ntb_alt_reg atom_b2b_reg;
79 static const struct intel_ntb_xlat_reg atom_pri_xlat;
80 static const struct intel_ntb_xlat_reg atom_sec_xlat;
81 static const struct intel_ntb_reg xeon_reg;
82 static const struct intel_ntb_alt_reg xeon_pri_reg;
83 static const struct intel_ntb_alt_reg xeon_sec_reg;
84 static const struct intel_ntb_alt_reg xeon_b2b_reg;
85 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
86 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
87 static struct intel_b2b_addr xeon_b2b_usd_addr;
88 static struct intel_b2b_addr xeon_b2b_dsd_addr;
89 static const struct ntb_dev_ops intel_ntb_ops;
90
91 static const struct file_operations intel_ntb_debugfs_info;
92 static struct dentry *debugfs_dir;
93
94 static int b2b_mw_idx = -1;
95 module_param(b2b_mw_idx, int, 0644);
96 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
97                  "value of zero or positive starts from first mw idx, and a "
98                  "negative value starts from last mw idx.  Both sides MUST "
99                  "set the same value here!");
100
101 static unsigned int b2b_mw_share;
102 module_param(b2b_mw_share, uint, 0644);
103 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
104                  "ntb so that the peer ntb only occupies the first half of "
105                  "the mw, so the second half can still be used as a mw.  Both "
106                  "sides MUST set the same value here!");
107
108 module_param_named(xeon_b2b_usd_bar2_addr64,
109                    xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
110 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
111                  "XEON B2B USD BAR 2 64-bit address");
112
113 module_param_named(xeon_b2b_usd_bar4_addr64,
114                    xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
115 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
116                  "XEON B2B USD BAR 4 64-bit address");
117
118 module_param_named(xeon_b2b_usd_bar4_addr32,
119                    xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
120 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
121                  "XEON B2B USD split-BAR 4 32-bit address");
122
123 module_param_named(xeon_b2b_usd_bar5_addr32,
124                    xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
125 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
126                  "XEON B2B USD split-BAR 5 32-bit address");
127
128 module_param_named(xeon_b2b_dsd_bar2_addr64,
129                    xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
130 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
131                  "XEON B2B DSD BAR 2 64-bit address");
132
133 module_param_named(xeon_b2b_dsd_bar4_addr64,
134                    xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
135 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
136                  "XEON B2B DSD BAR 4 64-bit address");
137
138 module_param_named(xeon_b2b_dsd_bar4_addr32,
139                    xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
140 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
141                  "XEON B2B DSD split-BAR 4 32-bit address");
142
143 module_param_named(xeon_b2b_dsd_bar5_addr32,
144                    xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
145 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
146                  "XEON B2B DSD split-BAR 5 32-bit address");
147
148 #ifndef ioread64
149 #ifdef readq
150 #define ioread64 readq
151 #else
152 #define ioread64 _ioread64
153 static inline u64 _ioread64(void __iomem *mmio)
154 {
155         u64 low, high;
156
157         low = ioread32(mmio);
158         high = ioread32(mmio + sizeof(u32));
159         return low | (high << 32);
160 }
161 #endif
162 #endif
163
164 #ifndef iowrite64
165 #ifdef writeq
166 #define iowrite64 writeq
167 #else
168 #define iowrite64 _iowrite64
169 static inline void _iowrite64(u64 val, void __iomem *mmio)
170 {
171         iowrite32(val, mmio);
172         iowrite32(val >> 32, mmio + sizeof(u32));
173 }
174 #endif
175 #endif
176
177 static inline int pdev_is_atom(struct pci_dev *pdev)
178 {
179         switch (pdev->device) {
180         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
181                 return 1;
182         }
183         return 0;
184 }
185
186 static inline int pdev_is_xeon(struct pci_dev *pdev)
187 {
188         switch (pdev->device) {
189         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
190         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
191         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
192         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
193         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
194         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
195         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
196         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
197         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
198         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
199         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
200         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
201                 return 1;
202         }
203         return 0;
204 }
205
206 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
207 {
208         ndev->unsafe_flags = 0;
209         ndev->unsafe_flags_ignore = 0;
210
211         /* Only B2B has a workaround to avoid SDOORBELL */
212         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
213                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
214                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
215
216         /* No low level workaround to avoid SB01BASE */
217         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
218                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
219                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
220         }
221 }
222
223 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
224                                  unsigned long flag)
225 {
226         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
227 }
228
229 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
230                                      unsigned long flag)
231 {
232         flag &= ndev->unsafe_flags;
233         ndev->unsafe_flags_ignore |= flag;
234
235         return !!flag;
236 }
237
238 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
239 {
240         if (idx < 0 || idx > ndev->mw_count)
241                 return -EINVAL;
242         return ndev->reg->mw_bar[idx];
243 }
244
245 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
246                                phys_addr_t *db_addr, resource_size_t *db_size,
247                                phys_addr_t reg_addr, unsigned long reg)
248 {
249         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
250                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
251
252         if (db_addr) {
253                 *db_addr = reg_addr + reg;
254                 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
255         }
256
257         if (db_size) {
258                 *db_size = ndev->reg->db_size;
259                 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
260         }
261
262         return 0;
263 }
264
265 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
266                                void __iomem *mmio)
267 {
268         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
269                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
270
271         return ndev->reg->db_ioread(mmio);
272 }
273
274 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
275                                 void __iomem *mmio)
276 {
277         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
278                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
279
280         if (db_bits & ~ndev->db_valid_mask)
281                 return -EINVAL;
282
283         ndev->reg->db_iowrite(db_bits, mmio);
284
285         return 0;
286 }
287
288 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
289                                    void __iomem *mmio)
290 {
291         unsigned long irqflags;
292
293         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
294                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
295
296         if (db_bits & ~ndev->db_valid_mask)
297                 return -EINVAL;
298
299         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
300         {
301                 ndev->db_mask |= db_bits;
302                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
303         }
304         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
305
306         return 0;
307 }
308
309 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
310                                      void __iomem *mmio)
311 {
312         unsigned long irqflags;
313
314         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
315                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
316
317         if (db_bits & ~ndev->db_valid_mask)
318                 return -EINVAL;
319
320         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
321         {
322                 ndev->db_mask &= ~db_bits;
323                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
324         }
325         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
326
327         return 0;
328 }
329
330 static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
331 {
332         u64 shift, mask;
333
334         shift = ndev->db_vec_shift;
335         mask = BIT_ULL(shift) - 1;
336
337         return mask << (shift * db_vector);
338 }
339
340 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
341                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
342                                  unsigned long reg)
343 {
344         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
345                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
346
347         if (idx < 0 || idx >= ndev->spad_count)
348                 return -EINVAL;
349
350         if (spad_addr) {
351                 *spad_addr = reg_addr + reg + (idx << 2);
352                 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
353         }
354
355         return 0;
356 }
357
358 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
359                                  void __iomem *mmio)
360 {
361         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
362                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
363
364         if (idx < 0 || idx >= ndev->spad_count)
365                 return 0;
366
367         return ioread32(mmio + (idx << 2));
368 }
369
370 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
371                                   void __iomem *mmio)
372 {
373         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
374                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
375
376         if (idx < 0 || idx >= ndev->spad_count)
377                 return -EINVAL;
378
379         iowrite32(val, mmio + (idx << 2));
380
381         return 0;
382 }
383
384 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
385 {
386         u64 vec_mask;
387
388         vec_mask = ndev_vec_mask(ndev, vec);
389
390         dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
391
392         ndev->last_ts = jiffies;
393
394         if (vec_mask & ndev->db_link_mask) {
395                 if (ndev->reg->poll_link(ndev))
396                         ntb_link_event(&ndev->ntb);
397         }
398
399         if (vec_mask & ndev->db_valid_mask)
400                 ntb_db_event(&ndev->ntb, vec);
401
402         return IRQ_HANDLED;
403 }
404
405 static irqreturn_t ndev_vec_isr(int irq, void *dev)
406 {
407         struct intel_ntb_vec *nvec = dev;
408
409         return ndev_interrupt(nvec->ndev, nvec->num);
410 }
411
412 static irqreturn_t ndev_irq_isr(int irq, void *dev)
413 {
414         struct intel_ntb_dev *ndev = dev;
415
416         return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
417 }
418
419 static int ndev_init_isr(struct intel_ntb_dev *ndev,
420                          int msix_min, int msix_max,
421                          int msix_shift, int total_shift)
422 {
423         struct pci_dev *pdev;
424         int rc, i, msix_count, node;
425
426         pdev = ndev_pdev(ndev);
427
428         node = dev_to_node(&pdev->dev);
429
430         /* Mask all doorbell interrupts */
431         ndev->db_mask = ndev->db_valid_mask;
432         ndev->reg->db_iowrite(ndev->db_mask,
433                               ndev->self_mmio +
434                               ndev->self_reg->db_mask);
435
436         /* Try to set up msix irq */
437
438         ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
439                                  GFP_KERNEL, node);
440         if (!ndev->vec)
441                 goto err_msix_vec_alloc;
442
443         ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
444                                   GFP_KERNEL, node);
445         if (!ndev->msix)
446                 goto err_msix_alloc;
447
448         for (i = 0; i < msix_max; ++i)
449                 ndev->msix[i].entry = i;
450
451         msix_count = pci_enable_msix_range(pdev, ndev->msix,
452                                            msix_min, msix_max);
453         if (msix_count < 0)
454                 goto err_msix_enable;
455
456         for (i = 0; i < msix_count; ++i) {
457                 ndev->vec[i].ndev = ndev;
458                 ndev->vec[i].num = i;
459                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
460                                  "ndev_vec_isr", &ndev->vec[i]);
461                 if (rc)
462                         goto err_msix_request;
463         }
464
465         dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
466         ndev->db_vec_count = msix_count;
467         ndev->db_vec_shift = msix_shift;
468         return 0;
469
470 err_msix_request:
471         while (i-- > 0)
472                 free_irq(ndev->msix[i].vector, ndev);
473         pci_disable_msix(pdev);
474 err_msix_enable:
475         kfree(ndev->msix);
476 err_msix_alloc:
477         kfree(ndev->vec);
478 err_msix_vec_alloc:
479         ndev->msix = NULL;
480         ndev->vec = NULL;
481
482         /* Try to set up msi irq */
483
484         rc = pci_enable_msi(pdev);
485         if (rc)
486                 goto err_msi_enable;
487
488         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
489                          "ndev_irq_isr", ndev);
490         if (rc)
491                 goto err_msi_request;
492
493         dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
494         ndev->db_vec_count = 1;
495         ndev->db_vec_shift = total_shift;
496         return 0;
497
498 err_msi_request:
499         pci_disable_msi(pdev);
500 err_msi_enable:
501
502         /* Try to set up intx irq */
503
504         pci_intx(pdev, 1);
505
506         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
507                          "ndev_irq_isr", ndev);
508         if (rc)
509                 goto err_intx_request;
510
511         dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
512         ndev->db_vec_count = 1;
513         ndev->db_vec_shift = total_shift;
514         return 0;
515
516 err_intx_request:
517         return rc;
518 }
519
520 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
521 {
522         struct pci_dev *pdev;
523         int i;
524
525         pdev = ndev_pdev(ndev);
526
527         /* Mask all doorbell interrupts */
528         ndev->db_mask = ndev->db_valid_mask;
529         ndev->reg->db_iowrite(ndev->db_mask,
530                               ndev->self_mmio +
531                               ndev->self_reg->db_mask);
532
533         if (ndev->msix) {
534                 i = ndev->db_vec_count;
535                 while (i--)
536                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
537                 pci_disable_msix(pdev);
538                 kfree(ndev->msix);
539                 kfree(ndev->vec);
540         } else {
541                 free_irq(pdev->irq, ndev);
542                 if (pci_dev_msi_enabled(pdev))
543                         pci_disable_msi(pdev);
544         }
545 }
546
547 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
548                                  size_t count, loff_t *offp)
549 {
550         struct intel_ntb_dev *ndev;
551         void __iomem *mmio;
552         char *buf;
553         size_t buf_size;
554         ssize_t ret, off;
555         union { u64 v64; u32 v32; u16 v16; } u;
556
557         ndev = filp->private_data;
558         mmio = ndev->self_mmio;
559
560         buf_size = min(count, 0x800ul);
561
562         buf = kmalloc(buf_size, GFP_KERNEL);
563         if (!buf)
564                 return -ENOMEM;
565
566         off = 0;
567
568         off += scnprintf(buf + off, buf_size - off,
569                          "NTB Device Information:\n");
570
571         off += scnprintf(buf + off, buf_size - off,
572                          "Connection Topology -\t%s\n",
573                          ntb_topo_string(ndev->ntb.topo));
574
575         off += scnprintf(buf + off, buf_size - off,
576                          "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
577         off += scnprintf(buf + off, buf_size - off,
578                          "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
579         off += scnprintf(buf + off, buf_size - off,
580                          "BAR4 Split -\t\t%s\n",
581                          ndev->bar4_split ? "yes" : "no");
582
583         off += scnprintf(buf + off, buf_size - off,
584                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
585         off += scnprintf(buf + off, buf_size - off,
586                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
587
588         if (!ndev->reg->link_is_up(ndev)) {
589                 off += scnprintf(buf + off, buf_size - off,
590                                  "Link Status -\t\tDown\n");
591         } else {
592                 off += scnprintf(buf + off, buf_size - off,
593                                  "Link Status -\t\tUp\n");
594                 off += scnprintf(buf + off, buf_size - off,
595                                  "Link Speed -\t\tPCI-E Gen %u\n",
596                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
597                 off += scnprintf(buf + off, buf_size - off,
598                                  "Link Width -\t\tx%u\n",
599                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
600         }
601
602         off += scnprintf(buf + off, buf_size - off,
603                          "Memory Window Count -\t%u\n", ndev->mw_count);
604         off += scnprintf(buf + off, buf_size - off,
605                          "Scratchpad Count -\t%u\n", ndev->spad_count);
606         off += scnprintf(buf + off, buf_size - off,
607                          "Doorbell Count -\t%u\n", ndev->db_count);
608         off += scnprintf(buf + off, buf_size - off,
609                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
610         off += scnprintf(buf + off, buf_size - off,
611                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
612
613         off += scnprintf(buf + off, buf_size - off,
614                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
615         off += scnprintf(buf + off, buf_size - off,
616                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
617         off += scnprintf(buf + off, buf_size - off,
618                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
619
620         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
621         off += scnprintf(buf + off, buf_size - off,
622                          "Doorbell Mask -\t\t%#llx\n", u.v64);
623
624         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
625         off += scnprintf(buf + off, buf_size - off,
626                          "Doorbell Bell -\t\t%#llx\n", u.v64);
627
628         off += scnprintf(buf + off, buf_size - off,
629                          "\nNTB Incoming XLAT:\n");
630
631         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
632         off += scnprintf(buf + off, buf_size - off,
633                          "XLAT23 -\t\t%#018llx\n", u.v64);
634
635         if (ndev->bar4_split) {
636                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
637                 off += scnprintf(buf + off, buf_size - off,
638                                  "XLAT4 -\t\t\t%#06x\n", u.v32);
639
640                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
641                 off += scnprintf(buf + off, buf_size - off,
642                                  "XLAT5 -\t\t\t%#06x\n", u.v32);
643         } else {
644                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
645                 off += scnprintf(buf + off, buf_size - off,
646                                  "XLAT45 -\t\t%#018llx\n", u.v64);
647         }
648
649         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
650         off += scnprintf(buf + off, buf_size - off,
651                          "LMT23 -\t\t\t%#018llx\n", u.v64);
652
653         if (ndev->bar4_split) {
654                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
655                 off += scnprintf(buf + off, buf_size - off,
656                                  "LMT4 -\t\t\t%#06x\n", u.v32);
657                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
658                 off += scnprintf(buf + off, buf_size - off,
659                                  "LMT5 -\t\t\t%#06x\n", u.v32);
660         } else {
661                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
662                 off += scnprintf(buf + off, buf_size - off,
663                                  "LMT45 -\t\t\t%#018llx\n", u.v64);
664         }
665
666         if (pdev_is_xeon(ndev->ntb.pdev)) {
667                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
668                         off += scnprintf(buf + off, buf_size - off,
669                                          "\nNTB Outgoing B2B XLAT:\n");
670
671                         u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
672                         off += scnprintf(buf + off, buf_size - off,
673                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
674
675                         if (ndev->bar4_split) {
676                                 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
677                                 off += scnprintf(buf + off, buf_size - off,
678                                                  "B2B XLAT4 -\t\t%#06x\n",
679                                                  u.v32);
680                                 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
681                                 off += scnprintf(buf + off, buf_size - off,
682                                                  "B2B XLAT5 -\t\t%#06x\n",
683                                                  u.v32);
684                         } else {
685                                 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
686                                 off += scnprintf(buf + off, buf_size - off,
687                                                  "B2B XLAT45 -\t\t%#018llx\n",
688                                                  u.v64);
689                         }
690
691                         u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
692                         off += scnprintf(buf + off, buf_size - off,
693                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
694
695                         if (ndev->bar4_split) {
696                                 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
697                                 off += scnprintf(buf + off, buf_size - off,
698                                                  "B2B LMT4 -\t\t%#06x\n",
699                                                  u.v32);
700                                 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
701                                 off += scnprintf(buf + off, buf_size - off,
702                                                  "B2B LMT5 -\t\t%#06x\n",
703                                                  u.v32);
704                         } else {
705                                 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
706                                 off += scnprintf(buf + off, buf_size - off,
707                                                  "B2B LMT45 -\t\t%#018llx\n",
708                                                  u.v64);
709                         }
710
711                         off += scnprintf(buf + off, buf_size - off,
712                                          "\nNTB Secondary BAR:\n");
713
714                         u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
715                         off += scnprintf(buf + off, buf_size - off,
716                                          "SBAR01 -\t\t%#018llx\n", u.v64);
717
718                         u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
719                         off += scnprintf(buf + off, buf_size - off,
720                                          "SBAR23 -\t\t%#018llx\n", u.v64);
721
722                         if (ndev->bar4_split) {
723                                 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
724                                 off += scnprintf(buf + off, buf_size - off,
725                                                  "SBAR4 -\t\t\t%#06x\n", u.v32);
726                                 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
727                                 off += scnprintf(buf + off, buf_size - off,
728                                                  "SBAR5 -\t\t\t%#06x\n", u.v32);
729                         } else {
730                                 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
731                                 off += scnprintf(buf + off, buf_size - off,
732                                                  "SBAR45 -\t\t%#018llx\n",
733                                                  u.v64);
734                         }
735                 }
736
737                 off += scnprintf(buf + off, buf_size - off,
738                                  "\nXEON NTB Statistics:\n");
739
740                 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
741                 off += scnprintf(buf + off, buf_size - off,
742                                  "Upstream Memory Miss -\t%u\n", u.v16);
743
744                 off += scnprintf(buf + off, buf_size - off,
745                                  "\nXEON NTB Hardware Errors:\n");
746
747                 if (!pci_read_config_word(ndev->ntb.pdev,
748                                           XEON_DEVSTS_OFFSET, &u.v16))
749                         off += scnprintf(buf + off, buf_size - off,
750                                          "DEVSTS -\t\t%#06x\n", u.v16);
751
752                 if (!pci_read_config_word(ndev->ntb.pdev,
753                                           XEON_LINK_STATUS_OFFSET, &u.v16))
754                         off += scnprintf(buf + off, buf_size - off,
755                                          "LNKSTS -\t\t%#06x\n", u.v16);
756
757                 if (!pci_read_config_dword(ndev->ntb.pdev,
758                                            XEON_UNCERRSTS_OFFSET, &u.v32))
759                         off += scnprintf(buf + off, buf_size - off,
760                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
761
762                 if (!pci_read_config_dword(ndev->ntb.pdev,
763                                            XEON_CORERRSTS_OFFSET, &u.v32))
764                         off += scnprintf(buf + off, buf_size - off,
765                                          "CORERRSTS -\t\t%#06x\n", u.v32);
766         }
767
768         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
769         kfree(buf);
770         return ret;
771 }
772
773 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
774 {
775         if (!debugfs_dir) {
776                 ndev->debugfs_dir = NULL;
777                 ndev->debugfs_info = NULL;
778         } else {
779                 ndev->debugfs_dir =
780                         debugfs_create_dir(ndev_name(ndev), debugfs_dir);
781                 if (!ndev->debugfs_dir)
782                         ndev->debugfs_info = NULL;
783                 else
784                         ndev->debugfs_info =
785                                 debugfs_create_file("info", S_IRUSR,
786                                                     ndev->debugfs_dir, ndev,
787                                                     &intel_ntb_debugfs_info);
788         }
789 }
790
791 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
792 {
793         debugfs_remove_recursive(ndev->debugfs_dir);
794 }
795
796 static int intel_ntb_mw_count(struct ntb_dev *ntb)
797 {
798         return ntb_ndev(ntb)->mw_count;
799 }
800
801 static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
802                                   phys_addr_t *base,
803                                   resource_size_t *size,
804                                   resource_size_t *align,
805                                   resource_size_t *align_size)
806 {
807         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
808         int bar;
809
810         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
811                 idx += 1;
812
813         bar = ndev_mw_to_bar(ndev, idx);
814         if (bar < 0)
815                 return bar;
816
817         if (base)
818                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
819                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
820
821         if (size)
822                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
823                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
824
825         if (align)
826                 *align = pci_resource_len(ndev->ntb.pdev, bar);
827
828         if (align_size)
829                 *align_size = 1;
830
831         return 0;
832 }
833
834 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
835                                   dma_addr_t addr, resource_size_t size)
836 {
837         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
838         unsigned long base_reg, xlat_reg, limit_reg;
839         resource_size_t bar_size, mw_size;
840         void __iomem *mmio;
841         u64 base, limit, reg_val;
842         int bar;
843
844         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
845                 idx += 1;
846
847         bar = ndev_mw_to_bar(ndev, idx);
848         if (bar < 0)
849                 return bar;
850
851         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
852
853         if (idx == ndev->b2b_idx)
854                 mw_size = bar_size - ndev->b2b_off;
855         else
856                 mw_size = bar_size;
857
858         /* hardware requires that addr is aligned to bar size */
859         if (addr & (bar_size - 1))
860                 return -EINVAL;
861
862         /* make sure the range fits in the usable mw size */
863         if (size > mw_size)
864                 return -EINVAL;
865
866         mmio = ndev->self_mmio;
867         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
868         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
869         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
870
871         if (bar < 4 || !ndev->bar4_split) {
872                 base = ioread64(mmio + base_reg);
873
874                 /* Set the limit if supported, if size is not mw_size */
875                 if (limit_reg && size != mw_size)
876                         limit = base + size;
877                 else
878                         limit = 0;
879
880                 /* set and verify setting the translation address */
881                 iowrite64(addr, mmio + xlat_reg);
882                 reg_val = ioread64(mmio + xlat_reg);
883                 if (reg_val != addr) {
884                         iowrite64(0, mmio + xlat_reg);
885                         return -EIO;
886                 }
887
888                 /* set and verify setting the limit */
889                 iowrite64(limit, mmio + limit_reg);
890                 reg_val = ioread64(mmio + limit_reg);
891                 if (reg_val != limit) {
892                         iowrite64(base, mmio + limit_reg);
893                         iowrite64(0, mmio + xlat_reg);
894                         return -EIO;
895                 }
896         } else {
897                 /* split bar addr range must all be 32 bit */
898                 if (addr & (~0ull << 32))
899                         return -EINVAL;
900                 if ((addr + size) & (~0ull << 32))
901                         return -EINVAL;
902
903                 base = ioread32(mmio + base_reg);
904
905                 /* Set the limit if supported, if size is not mw_size */
906                 if (limit_reg && size != mw_size)
907                         limit = base + size;
908                 else
909                         limit = 0;
910
911                 /* set and verify setting the translation address */
912                 iowrite32(addr, mmio + xlat_reg);
913                 reg_val = ioread32(mmio + xlat_reg);
914                 if (reg_val != addr) {
915                         iowrite32(0, mmio + xlat_reg);
916                         return -EIO;
917                 }
918
919                 /* set and verify setting the limit */
920                 iowrite32(limit, mmio + limit_reg);
921                 reg_val = ioread32(mmio + limit_reg);
922                 if (reg_val != limit) {
923                         iowrite32(base, mmio + limit_reg);
924                         iowrite32(0, mmio + xlat_reg);
925                         return -EIO;
926                 }
927         }
928
929         return 0;
930 }
931
932 static int intel_ntb_link_is_up(struct ntb_dev *ntb,
933                                 enum ntb_speed *speed,
934                                 enum ntb_width *width)
935 {
936         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
937
938         if (ndev->reg->link_is_up(ndev)) {
939                 if (speed)
940                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
941                 if (width)
942                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
943                 return 1;
944         } else {
945                 /* TODO MAYBE: is it possible to observe the link speed and
946                  * width while link is training? */
947                 if (speed)
948                         *speed = NTB_SPEED_NONE;
949                 if (width)
950                         *width = NTB_WIDTH_NONE;
951                 return 0;
952         }
953 }
954
955 static int intel_ntb_link_enable(struct ntb_dev *ntb,
956                                  enum ntb_speed max_speed,
957                                  enum ntb_width max_width)
958 {
959         struct intel_ntb_dev *ndev;
960         u32 ntb_ctl;
961
962         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
963
964         if (ndev->ntb.topo == NTB_TOPO_SEC)
965                 return -EINVAL;
966
967         dev_dbg(ndev_dev(ndev),
968                 "Enabling link with max_speed %d max_width %d\n",
969                 max_speed, max_width);
970         if (max_speed != NTB_SPEED_AUTO)
971                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
972         if (max_width != NTB_WIDTH_AUTO)
973                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
974
975         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
976         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
977         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
978         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
979         if (ndev->bar4_split)
980                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
981         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
982
983         return 0;
984 }
985
986 static int intel_ntb_link_disable(struct ntb_dev *ntb)
987 {
988         struct intel_ntb_dev *ndev;
989         u32 ntb_cntl;
990
991         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
992
993         if (ndev->ntb.topo == NTB_TOPO_SEC)
994                 return -EINVAL;
995
996         dev_dbg(ndev_dev(ndev), "Disabling link\n");
997
998         /* Bring NTB link down */
999         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1000         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1001         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1002         if (ndev->bar4_split)
1003                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1004         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1005         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1006
1007         return 0;
1008 }
1009
1010 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1011 {
1012         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1013 }
1014
1015 static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1016 {
1017         return ntb_ndev(ntb)->db_valid_mask;
1018 }
1019
1020 static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1021 {
1022         struct intel_ntb_dev *ndev;
1023
1024         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1025
1026         return ndev->db_vec_count;
1027 }
1028
1029 static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1030 {
1031         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1032
1033         if (db_vector < 0 || db_vector > ndev->db_vec_count)
1034                 return 0;
1035
1036         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1037 }
1038
1039 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1040 {
1041         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1042
1043         return ndev_db_read(ndev,
1044                             ndev->self_mmio +
1045                             ndev->self_reg->db_bell);
1046 }
1047
1048 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1049 {
1050         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1051
1052         return ndev_db_write(ndev, db_bits,
1053                              ndev->self_mmio +
1054                              ndev->self_reg->db_bell);
1055 }
1056
1057 static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1058 {
1059         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1060
1061         return ndev_db_set_mask(ndev, db_bits,
1062                                 ndev->self_mmio +
1063                                 ndev->self_reg->db_mask);
1064 }
1065
1066 static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1067 {
1068         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1069
1070         return ndev_db_clear_mask(ndev, db_bits,
1071                                   ndev->self_mmio +
1072                                   ndev->self_reg->db_mask);
1073 }
1074
1075 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1076                                   phys_addr_t *db_addr,
1077                                   resource_size_t *db_size)
1078 {
1079         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1080
1081         return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1082                             ndev->peer_reg->db_bell);
1083 }
1084
1085 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1086 {
1087         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1088
1089         return ndev_db_write(ndev, db_bits,
1090                              ndev->peer_mmio +
1091                              ndev->peer_reg->db_bell);
1092 }
1093
1094 static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1095 {
1096         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1097 }
1098
1099 static int intel_ntb_spad_count(struct ntb_dev *ntb)
1100 {
1101         struct intel_ntb_dev *ndev;
1102
1103         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1104
1105         return ndev->spad_count;
1106 }
1107
1108 static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1109 {
1110         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1111
1112         return ndev_spad_read(ndev, idx,
1113                               ndev->self_mmio +
1114                               ndev->self_reg->spad);
1115 }
1116
1117 static int intel_ntb_spad_write(struct ntb_dev *ntb,
1118                                 int idx, u32 val)
1119 {
1120         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1121
1122         return ndev_spad_write(ndev, idx, val,
1123                                ndev->self_mmio +
1124                                ndev->self_reg->spad);
1125 }
1126
1127 static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1128                                     phys_addr_t *spad_addr)
1129 {
1130         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1131
1132         return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1133                               ndev->peer_reg->spad);
1134 }
1135
1136 static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1137 {
1138         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1139
1140         return ndev_spad_read(ndev, idx,
1141                               ndev->peer_mmio +
1142                               ndev->peer_reg->spad);
1143 }
1144
1145 static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1146                                      int idx, u32 val)
1147 {
1148         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1149
1150         return ndev_spad_write(ndev, idx, val,
1151                                ndev->peer_mmio +
1152                                ndev->peer_reg->spad);
1153 }
1154
1155 /* ATOM */
1156
1157 static u64 atom_db_ioread(void __iomem *mmio)
1158 {
1159         return ioread64(mmio);
1160 }
1161
1162 static void atom_db_iowrite(u64 bits, void __iomem *mmio)
1163 {
1164         iowrite64(bits, mmio);
1165 }
1166
1167 static int atom_poll_link(struct intel_ntb_dev *ndev)
1168 {
1169         u32 ntb_ctl;
1170
1171         ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
1172
1173         if (ntb_ctl == ndev->ntb_ctl)
1174                 return 0;
1175
1176         ndev->ntb_ctl = ntb_ctl;
1177
1178         ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
1179
1180         return 1;
1181 }
1182
1183 static int atom_link_is_up(struct intel_ntb_dev *ndev)
1184 {
1185         return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1186 }
1187
1188 static int atom_link_is_err(struct intel_ntb_dev *ndev)
1189 {
1190         if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
1191             & ATOM_LTSSMSTATEJMP_FORCEDETECT)
1192                 return 1;
1193
1194         if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
1195             & ATOM_IBIST_ERR_OFLOW)
1196                 return 1;
1197
1198         return 0;
1199 }
1200
1201 static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1202 {
1203         switch (ppd & ATOM_PPD_TOPO_MASK) {
1204         case ATOM_PPD_TOPO_B2B_USD:
1205                 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1206                 return NTB_TOPO_B2B_USD;
1207
1208         case ATOM_PPD_TOPO_B2B_DSD:
1209                 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1210                 return NTB_TOPO_B2B_DSD;
1211
1212         case ATOM_PPD_TOPO_PRI_USD:
1213         case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1214         case ATOM_PPD_TOPO_SEC_USD:
1215         case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1216                 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1217                 return NTB_TOPO_NONE;
1218         }
1219
1220         dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1221         return NTB_TOPO_NONE;
1222 }
1223
1224 static void atom_link_hb(struct work_struct *work)
1225 {
1226         struct intel_ntb_dev *ndev = hb_ndev(work);
1227         unsigned long poll_ts;
1228         void __iomem *mmio;
1229         u32 status32;
1230
1231         poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
1232
1233         /* Delay polling the link status if an interrupt was received,
1234          * unless the cached link status says the link is down.
1235          */
1236         if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
1237                 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1238                 return;
1239         }
1240
1241         if (atom_poll_link(ndev))
1242                 ntb_link_event(&ndev->ntb);
1243
1244         if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
1245                 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1246                 return;
1247         }
1248
1249         /* Link is down with error: recover the link! */
1250
1251         mmio = ndev->self_mmio;
1252
1253         /* Driver resets the NTB ModPhy lanes - magic! */
1254         iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
1255         iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
1256         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
1257         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
1258
1259         /* Driver waits 100ms to allow the NTB ModPhy to settle */
1260         msleep(100);
1261
1262         /* Clear AER Errors, write to clear */
1263         status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
1264         dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1265         status32 &= PCI_ERR_COR_REP_ROLL;
1266         iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
1267
1268         /* Clear unexpected electrical idle event in LTSSM, write to clear */
1269         status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
1270         dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1271         status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1272         iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
1273
1274         /* Clear DeSkew Buffer error, write to clear */
1275         status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
1276         dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1277         status32 |= ATOM_DESKEWSTS_DBERR;
1278         iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
1279
1280         status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1281         dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1282         status32 &= ATOM_IBIST_ERR_OFLOW;
1283         iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1284
1285         /* Releases the NTB state machine to allow the link to retrain */
1286         status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1287         dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1288         status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1289         iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1290
1291         /* There is a potential race between the 2 NTB devices recovering at the
1292          * same time.  If the times are the same, the link will not recover and
1293          * the driver will be stuck in this loop forever.  Add a random interval
1294          * to the recovery time to prevent this race.
1295          */
1296         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
1297                               + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
1298 }
1299
1300 static int atom_init_isr(struct intel_ntb_dev *ndev)
1301 {
1302         int rc;
1303
1304         rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
1305                            ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
1306         if (rc)
1307                 return rc;
1308
1309         /* ATOM doesn't have link status interrupt, poll on that platform */
1310         ndev->last_ts = jiffies;
1311         INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
1312         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1313
1314         return 0;
1315 }
1316
1317 static void atom_deinit_isr(struct intel_ntb_dev *ndev)
1318 {
1319         cancel_delayed_work_sync(&ndev->hb_timer);
1320         ndev_deinit_isr(ndev);
1321 }
1322
1323 static int atom_init_ntb(struct intel_ntb_dev *ndev)
1324 {
1325         ndev->mw_count = ATOM_MW_COUNT;
1326         ndev->spad_count = ATOM_SPAD_COUNT;
1327         ndev->db_count = ATOM_DB_COUNT;
1328
1329         switch (ndev->ntb.topo) {
1330         case NTB_TOPO_B2B_USD:
1331         case NTB_TOPO_B2B_DSD:
1332                 ndev->self_reg = &atom_pri_reg;
1333                 ndev->peer_reg = &atom_b2b_reg;
1334                 ndev->xlat_reg = &atom_sec_xlat;
1335
1336                 /* Enable Bus Master and Memory Space on the secondary side */
1337                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1338                           ndev->self_mmio + ATOM_SPCICMD_OFFSET);
1339
1340                 break;
1341
1342         default:
1343                 return -EINVAL;
1344         }
1345
1346         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1347
1348         return 0;
1349 }
1350
1351 static int atom_init_dev(struct intel_ntb_dev *ndev)
1352 {
1353         u32 ppd;
1354         int rc;
1355
1356         rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
1357         if (rc)
1358                 return -EIO;
1359
1360         ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
1361         if (ndev->ntb.topo == NTB_TOPO_NONE)
1362                 return -EINVAL;
1363
1364         rc = atom_init_ntb(ndev);
1365         if (rc)
1366                 return rc;
1367
1368         rc = atom_init_isr(ndev);
1369         if (rc)
1370                 return rc;
1371
1372         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1373                 /* Initiate PCI-E link training */
1374                 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
1375                                             ppd | ATOM_PPD_INIT_LINK);
1376                 if (rc)
1377                         return rc;
1378         }
1379
1380         return 0;
1381 }
1382
1383 static void atom_deinit_dev(struct intel_ntb_dev *ndev)
1384 {
1385         atom_deinit_isr(ndev);
1386 }
1387
1388 /* XEON */
1389
1390 static u64 xeon_db_ioread(void __iomem *mmio)
1391 {
1392         return (u64)ioread16(mmio);
1393 }
1394
1395 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
1396 {
1397         iowrite16((u16)bits, mmio);
1398 }
1399
1400 static int xeon_poll_link(struct intel_ntb_dev *ndev)
1401 {
1402         u16 reg_val;
1403         int rc;
1404
1405         ndev->reg->db_iowrite(ndev->db_link_mask,
1406                               ndev->self_mmio +
1407                               ndev->self_reg->db_bell);
1408
1409         rc = pci_read_config_word(ndev->ntb.pdev,
1410                                   XEON_LINK_STATUS_OFFSET, &reg_val);
1411         if (rc)
1412                 return 0;
1413
1414         if (reg_val == ndev->lnk_sta)
1415                 return 0;
1416
1417         ndev->lnk_sta = reg_val;
1418
1419         return 1;
1420 }
1421
1422 static int xeon_link_is_up(struct intel_ntb_dev *ndev)
1423 {
1424         if (ndev->ntb.topo == NTB_TOPO_SEC)
1425                 return 1;
1426
1427         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1428 }
1429
1430 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1431 {
1432         switch (ppd & XEON_PPD_TOPO_MASK) {
1433         case XEON_PPD_TOPO_B2B_USD:
1434                 return NTB_TOPO_B2B_USD;
1435
1436         case XEON_PPD_TOPO_B2B_DSD:
1437                 return NTB_TOPO_B2B_DSD;
1438
1439         case XEON_PPD_TOPO_PRI_USD:
1440         case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1441                 return NTB_TOPO_PRI;
1442
1443         case XEON_PPD_TOPO_SEC_USD:
1444         case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1445                 return NTB_TOPO_SEC;
1446         }
1447
1448         return NTB_TOPO_NONE;
1449 }
1450
1451 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1452 {
1453         if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
1454                 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
1455                 return 1;
1456         }
1457         return 0;
1458 }
1459
1460 static int xeon_init_isr(struct intel_ntb_dev *ndev)
1461 {
1462         return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1463                              XEON_DB_MSIX_VECTOR_COUNT,
1464                              XEON_DB_MSIX_VECTOR_SHIFT,
1465                              XEON_DB_TOTAL_SHIFT);
1466 }
1467
1468 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
1469 {
1470         ndev_deinit_isr(ndev);
1471 }
1472
1473 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1474                              const struct intel_b2b_addr *addr,
1475                              const struct intel_b2b_addr *peer_addr)
1476 {
1477         struct pci_dev *pdev;
1478         void __iomem *mmio;
1479         resource_size_t bar_size;
1480         phys_addr_t bar_addr;
1481         int b2b_bar;
1482         u8 bar_sz;
1483
1484         pdev = ndev_pdev(ndev);
1485         mmio = ndev->self_mmio;
1486
1487         if (ndev->b2b_idx >= ndev->mw_count) {
1488                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1489                 b2b_bar = 0;
1490                 ndev->b2b_off = 0;
1491         } else {
1492                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1493                 if (b2b_bar < 0)
1494                         return -EIO;
1495
1496                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1497
1498                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1499
1500                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1501
1502                 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
1503                         dev_dbg(ndev_dev(ndev),
1504                                 "b2b using first half of bar\n");
1505                         ndev->b2b_off = bar_size >> 1;
1506                 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
1507                         dev_dbg(ndev_dev(ndev),
1508                                 "b2b using whole bar\n");
1509                         ndev->b2b_off = 0;
1510                         --ndev->mw_count;
1511                 } else {
1512                         dev_dbg(ndev_dev(ndev),
1513                                 "b2b bar size is too small\n");
1514                         return -EIO;
1515                 }
1516         }
1517
1518         /* Reset the secondary bar sizes to match the primary bar sizes,
1519          * except disable or halve the size of the b2b secondary bar.
1520          *
1521          * Note: code for each specific bar size register, because the register
1522          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1523          */
1524         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
1525         dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
1526         if (b2b_bar == 2) {
1527                 if (ndev->b2b_off)
1528                         bar_sz -= 1;
1529                 else
1530                         bar_sz = 0;
1531         }
1532         pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1533         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
1534         dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
1535
1536         if (!ndev->bar4_split) {
1537                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
1538                 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
1539                 if (b2b_bar == 4) {
1540                         if (ndev->b2b_off)
1541                                 bar_sz -= 1;
1542                         else
1543                                 bar_sz = 0;
1544                 }
1545                 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1546                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
1547                 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
1548         } else {
1549                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
1550                 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
1551                 if (b2b_bar == 4) {
1552                         if (ndev->b2b_off)
1553                                 bar_sz -= 1;
1554                         else
1555                                 bar_sz = 0;
1556                 }
1557                 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1558                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
1559                 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
1560
1561                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
1562                 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
1563                 if (b2b_bar == 5) {
1564                         if (ndev->b2b_off)
1565                                 bar_sz -= 1;
1566                         else
1567                                 bar_sz = 0;
1568                 }
1569                 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1570                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
1571                 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
1572         }
1573
1574         /* SBAR01 hit by first part of the b2b bar */
1575         if (b2b_bar == 0)
1576                 bar_addr = addr->bar0_addr;
1577         else if (b2b_bar == 2)
1578                 bar_addr = addr->bar2_addr64;
1579         else if (b2b_bar == 4 && !ndev->bar4_split)
1580                 bar_addr = addr->bar4_addr64;
1581         else if (b2b_bar == 4)
1582                 bar_addr = addr->bar4_addr32;
1583         else if (b2b_bar == 5)
1584                 bar_addr = addr->bar5_addr32;
1585         else
1586                 return -EIO;
1587
1588         dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1589         iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
1590
1591         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1592          * The b2b bar is either disabled above, or configured half-size, and
1593          * it starts at the PBAR xlat + offset.
1594          */
1595
1596         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1597         iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1598         bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
1599         dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
1600
1601         if (!ndev->bar4_split) {
1602                 bar_addr = addr->bar4_addr64 +
1603                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1604                 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1605                 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
1606                 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
1607         } else {
1608                 bar_addr = addr->bar4_addr32 +
1609                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1610                 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1611                 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
1612                 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
1613
1614                 bar_addr = addr->bar5_addr32 +
1615                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1616                 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1617                 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
1618                 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
1619         }
1620
1621         /* setup incoming bar limits == base addrs (zero length windows) */
1622
1623         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1624         iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1625         bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
1626         dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1627
1628         if (!ndev->bar4_split) {
1629                 bar_addr = addr->bar4_addr64 +
1630                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1631                 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1632                 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
1633                 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
1634         } else {
1635                 bar_addr = addr->bar4_addr32 +
1636                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1637                 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
1638                 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
1639                 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
1640
1641                 bar_addr = addr->bar5_addr32 +
1642                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1643                 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
1644                 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
1645                 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1646         }
1647
1648         /* zero incoming translation addrs */
1649         iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
1650
1651         if (!ndev->bar4_split) {
1652                 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
1653         } else {
1654                 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
1655                 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
1656         }
1657
1658         /* zero outgoing translation limits (whole bar size windows) */
1659         iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
1660         if (!ndev->bar4_split) {
1661                 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
1662         } else {
1663                 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
1664                 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
1665         }
1666
1667         /* set outgoing translation offsets */
1668         bar_addr = peer_addr->bar2_addr64;
1669         iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
1670         bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
1671         dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
1672
1673         if (!ndev->bar4_split) {
1674                 bar_addr = peer_addr->bar4_addr64;
1675                 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
1676                 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
1677                 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
1678         } else {
1679                 bar_addr = peer_addr->bar4_addr32;
1680                 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
1681                 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
1682                 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
1683
1684                 bar_addr = peer_addr->bar5_addr32;
1685                 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
1686                 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
1687                 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
1688         }
1689
1690         /* set the translation offset for b2b registers */
1691         if (b2b_bar == 0)
1692                 bar_addr = peer_addr->bar0_addr;
1693         else if (b2b_bar == 2)
1694                 bar_addr = peer_addr->bar2_addr64;
1695         else if (b2b_bar == 4 && !ndev->bar4_split)
1696                 bar_addr = peer_addr->bar4_addr64;
1697         else if (b2b_bar == 4)
1698                 bar_addr = peer_addr->bar4_addr32;
1699         else if (b2b_bar == 5)
1700                 bar_addr = peer_addr->bar5_addr32;
1701         else
1702                 return -EIO;
1703
1704         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1705         dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1706         iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
1707         iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
1708
1709         if (b2b_bar) {
1710                 /* map peer ntb mmio config space registers */
1711                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1712                                             XEON_B2B_MIN_SIZE);
1713                 if (!ndev->peer_mmio)
1714                         return -EIO;
1715         }
1716
1717         return 0;
1718 }
1719
1720 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1721 {
1722         int rc;
1723         u32 ntb_ctl;
1724
1725         if (ndev->bar4_split)
1726                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1727         else
1728                 ndev->mw_count = XEON_MW_COUNT;
1729
1730         ndev->spad_count = XEON_SPAD_COUNT;
1731         ndev->db_count = XEON_DB_COUNT;
1732         ndev->db_link_mask = XEON_DB_LINK_BIT;
1733
1734         switch (ndev->ntb.topo) {
1735         case NTB_TOPO_PRI:
1736                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1737                         dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
1738                         return -EINVAL;
1739                 }
1740
1741                 /* enable link to allow secondary side device to appear */
1742                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1743                 ntb_ctl &= ~NTB_CTL_DISABLE;
1744                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1745
1746                 /* use half the spads for the peer */
1747                 ndev->spad_count >>= 1;
1748                 ndev->self_reg = &xeon_pri_reg;
1749                 ndev->peer_reg = &xeon_sec_reg;
1750                 ndev->xlat_reg = &xeon_sec_xlat;
1751                 break;
1752
1753         case NTB_TOPO_SEC:
1754                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1755                         dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
1756                         return -EINVAL;
1757                 }
1758                 /* use half the spads for the peer */
1759                 ndev->spad_count >>= 1;
1760                 ndev->self_reg = &xeon_sec_reg;
1761                 ndev->peer_reg = &xeon_pri_reg;
1762                 ndev->xlat_reg = &xeon_pri_xlat;
1763                 break;
1764
1765         case NTB_TOPO_B2B_USD:
1766         case NTB_TOPO_B2B_DSD:
1767                 ndev->self_reg = &xeon_pri_reg;
1768                 ndev->peer_reg = &xeon_b2b_reg;
1769                 ndev->xlat_reg = &xeon_sec_xlat;
1770
1771                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1772                         ndev->peer_reg = &xeon_pri_reg;
1773
1774                         if (b2b_mw_idx < 0)
1775                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1776                         else
1777                                 ndev->b2b_idx = b2b_mw_idx;
1778
1779                         dev_dbg(ndev_dev(ndev),
1780                                 "setting up b2b mw idx %d means %d\n",
1781                                 b2b_mw_idx, ndev->b2b_idx);
1782
1783                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1784                         dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
1785                         ndev->db_count -= 1;
1786                 }
1787
1788                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1789                         rc = xeon_setup_b2b_mw(ndev,
1790                                                &xeon_b2b_dsd_addr,
1791                                                &xeon_b2b_usd_addr);
1792                 } else {
1793                         rc = xeon_setup_b2b_mw(ndev,
1794                                                &xeon_b2b_usd_addr,
1795                                                &xeon_b2b_dsd_addr);
1796                 }
1797                 if (rc)
1798                         return rc;
1799
1800                 /* Enable Bus Master and Memory Space on the secondary side */
1801                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1802                           ndev->self_mmio + XEON_SPCICMD_OFFSET);
1803
1804                 break;
1805
1806         default:
1807                 return -EINVAL;
1808         }
1809
1810         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1811
1812         ndev->reg->db_iowrite(ndev->db_valid_mask,
1813                               ndev->self_mmio +
1814                               ndev->self_reg->db_mask);
1815
1816         return 0;
1817 }
1818
1819 static int xeon_init_dev(struct intel_ntb_dev *ndev)
1820 {
1821         struct pci_dev *pdev;
1822         u8 ppd;
1823         int rc, mem;
1824
1825         pdev = ndev_pdev(ndev);
1826
1827         switch (pdev->device) {
1828         /* There is a Xeon hardware errata related to writes to SDOORBELL or
1829          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1830          * which may hang the system.  To workaround this use the second memory
1831          * window to access the interrupt and scratch pad registers on the
1832          * remote system.
1833          */
1834         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1835         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1836         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1837         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1838         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1839         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1840         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1841         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1842         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1843         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1844         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1845         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1846                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1847                 break;
1848         }
1849
1850         switch (pdev->device) {
1851         /* There is a hardware errata related to accessing any register in
1852          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1853          */
1854         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1855         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1856         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1857         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1858         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1859         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1860                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1861                 break;
1862         }
1863
1864         switch (pdev->device) {
1865         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1866          * mirrored to the remote system.  Shrink the number of bits by one,
1867          * since bit 14 is the last bit.
1868          */
1869         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1870         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1871         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1872         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1873         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1874         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1875         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1876         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1877         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1878         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1879         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1880         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1881                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1882                 break;
1883         }
1884
1885         ndev->reg = &xeon_reg;
1886
1887         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1888         if (rc)
1889                 return -EIO;
1890
1891         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1892         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1893                 ntb_topo_string(ndev->ntb.topo));
1894         if (ndev->ntb.topo == NTB_TOPO_NONE)
1895                 return -EINVAL;
1896
1897         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1898                 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
1899                 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
1900                         ppd, ndev->bar4_split);
1901         } else {
1902                 /* This is a way for transparent BAR to figure out if we are
1903                  * doing split BAR or not. There is no way for the hw on the
1904                  * transparent side to know and set the PPD.
1905                  */
1906                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1907                 ndev->bar4_split = hweight32(mem) ==
1908                         HSX_SPLIT_BAR_MW_COUNT + 1;
1909                 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
1910                         mem, ndev->bar4_split);
1911         }
1912
1913         rc = xeon_init_ntb(ndev);
1914         if (rc)
1915                 return rc;
1916
1917         return xeon_init_isr(ndev);
1918 }
1919
1920 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
1921 {
1922         xeon_deinit_isr(ndev);
1923 }
1924
1925 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1926 {
1927         int rc;
1928
1929         pci_set_drvdata(pdev, ndev);
1930
1931         rc = pci_enable_device(pdev);
1932         if (rc)
1933                 goto err_pci_enable;
1934
1935         rc = pci_request_regions(pdev, NTB_NAME);
1936         if (rc)
1937                 goto err_pci_regions;
1938
1939         pci_set_master(pdev);
1940
1941         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1942         if (rc) {
1943                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1944                 if (rc)
1945                         goto err_dma_mask;
1946                 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
1947         }
1948
1949         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1950         if (rc) {
1951                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1952                 if (rc)
1953                         goto err_dma_mask;
1954                 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
1955         }
1956
1957         ndev->self_mmio = pci_iomap(pdev, 0, 0);
1958         if (!ndev->self_mmio) {
1959                 rc = -EIO;
1960                 goto err_mmio;
1961         }
1962         ndev->peer_mmio = ndev->self_mmio;
1963
1964         return 0;
1965
1966 err_mmio:
1967 err_dma_mask:
1968         pci_clear_master(pdev);
1969         pci_release_regions(pdev);
1970 err_pci_regions:
1971         pci_disable_device(pdev);
1972 err_pci_enable:
1973         pci_set_drvdata(pdev, NULL);
1974         return rc;
1975 }
1976
1977 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1978 {
1979         struct pci_dev *pdev = ndev_pdev(ndev);
1980
1981         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1982                 pci_iounmap(pdev, ndev->peer_mmio);
1983         pci_iounmap(pdev, ndev->self_mmio);
1984
1985         pci_clear_master(pdev);
1986         pci_release_regions(pdev);
1987         pci_disable_device(pdev);
1988         pci_set_drvdata(pdev, NULL);
1989 }
1990
1991 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1992                                     struct pci_dev *pdev)
1993 {
1994         ndev->ntb.pdev = pdev;
1995         ndev->ntb.topo = NTB_TOPO_NONE;
1996         ndev->ntb.ops = &intel_ntb_ops;
1997
1998         ndev->b2b_off = 0;
1999         ndev->b2b_idx = INT_MAX;
2000
2001         ndev->bar4_split = 0;
2002
2003         ndev->mw_count = 0;
2004         ndev->spad_count = 0;
2005         ndev->db_count = 0;
2006         ndev->db_vec_count = 0;
2007         ndev->db_vec_shift = 0;
2008
2009         ndev->ntb_ctl = 0;
2010         ndev->lnk_sta = 0;
2011
2012         ndev->db_valid_mask = 0;
2013         ndev->db_link_mask = 0;
2014         ndev->db_mask = 0;
2015
2016         spin_lock_init(&ndev->db_mask_lock);
2017 }
2018
2019 static int intel_ntb_pci_probe(struct pci_dev *pdev,
2020                                const struct pci_device_id *id)
2021 {
2022         struct intel_ntb_dev *ndev;
2023         int rc, node;
2024
2025         node = dev_to_node(&pdev->dev);
2026
2027         if (pdev_is_atom(pdev)) {
2028                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2029                 if (!ndev) {
2030                         rc = -ENOMEM;
2031                         goto err_ndev;
2032                 }
2033
2034                 ndev_init_struct(ndev, pdev);
2035
2036                 rc = intel_ntb_init_pci(ndev, pdev);
2037                 if (rc)
2038                         goto err_init_pci;
2039
2040                 rc = atom_init_dev(ndev);
2041                 if (rc)
2042                         goto err_init_dev;
2043
2044         } else if (pdev_is_xeon(pdev)) {
2045                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2046                 if (!ndev) {
2047                         rc = -ENOMEM;
2048                         goto err_ndev;
2049                 }
2050
2051                 ndev_init_struct(ndev, pdev);
2052
2053                 rc = intel_ntb_init_pci(ndev, pdev);
2054                 if (rc)
2055                         goto err_init_pci;
2056
2057                 rc = xeon_init_dev(ndev);
2058                 if (rc)
2059                         goto err_init_dev;
2060
2061         } else {
2062                 rc = -EINVAL;
2063                 goto err_ndev;
2064         }
2065
2066         ndev_reset_unsafe_flags(ndev);
2067
2068         ndev->reg->poll_link(ndev);
2069
2070         ndev_init_debugfs(ndev);
2071
2072         rc = ntb_register_device(&ndev->ntb);
2073         if (rc)
2074                 goto err_register;
2075
2076         dev_info(&pdev->dev, "NTB device registered.\n");
2077
2078         return 0;
2079
2080 err_register:
2081         ndev_deinit_debugfs(ndev);
2082         if (pdev_is_atom(pdev))
2083                 atom_deinit_dev(ndev);
2084         else if (pdev_is_xeon(pdev))
2085                 xeon_deinit_dev(ndev);
2086 err_init_dev:
2087         intel_ntb_deinit_pci(ndev);
2088 err_init_pci:
2089         kfree(ndev);
2090 err_ndev:
2091         return rc;
2092 }
2093
2094 static void intel_ntb_pci_remove(struct pci_dev *pdev)
2095 {
2096         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2097
2098         ntb_unregister_device(&ndev->ntb);
2099         ndev_deinit_debugfs(ndev);
2100         if (pdev_is_atom(pdev))
2101                 atom_deinit_dev(ndev);
2102         else if (pdev_is_xeon(pdev))
2103                 xeon_deinit_dev(ndev);
2104         intel_ntb_deinit_pci(ndev);
2105         kfree(ndev);
2106 }
2107
2108 static const struct intel_ntb_reg atom_reg = {
2109         .poll_link              = atom_poll_link,
2110         .link_is_up             = atom_link_is_up,
2111         .db_ioread              = atom_db_ioread,
2112         .db_iowrite             = atom_db_iowrite,
2113         .db_size                = sizeof(u64),
2114         .ntb_ctl                = ATOM_NTBCNTL_OFFSET,
2115         .mw_bar                 = {2, 4},
2116 };
2117
2118 static const struct intel_ntb_alt_reg atom_pri_reg = {
2119         .db_bell                = ATOM_PDOORBELL_OFFSET,
2120         .db_mask                = ATOM_PDBMSK_OFFSET,
2121         .spad                   = ATOM_SPAD_OFFSET,
2122 };
2123
2124 static const struct intel_ntb_alt_reg atom_b2b_reg = {
2125         .db_bell                = ATOM_B2B_DOORBELL_OFFSET,
2126         .spad                   = ATOM_B2B_SPAD_OFFSET,
2127 };
2128
2129 static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2130         /* FIXME : .bar0_base   = ATOM_SBAR0BASE_OFFSET, */
2131         /* FIXME : .bar2_limit  = ATOM_SBAR2LMT_OFFSET, */
2132         .bar2_xlat              = ATOM_SBAR2XLAT_OFFSET,
2133 };
2134
2135 static const struct intel_ntb_reg xeon_reg = {
2136         .poll_link              = xeon_poll_link,
2137         .link_is_up             = xeon_link_is_up,
2138         .db_ioread              = xeon_db_ioread,
2139         .db_iowrite             = xeon_db_iowrite,
2140         .db_size                = sizeof(u32),
2141         .ntb_ctl                = XEON_NTBCNTL_OFFSET,
2142         .mw_bar                 = {2, 4, 5},
2143 };
2144
2145 static const struct intel_ntb_alt_reg xeon_pri_reg = {
2146         .db_bell                = XEON_PDOORBELL_OFFSET,
2147         .db_mask                = XEON_PDBMSK_OFFSET,
2148         .spad                   = XEON_SPAD_OFFSET,
2149 };
2150
2151 static const struct intel_ntb_alt_reg xeon_sec_reg = {
2152         .db_bell                = XEON_SDOORBELL_OFFSET,
2153         .db_mask                = XEON_SDBMSK_OFFSET,
2154         /* second half of the scratchpads */
2155         .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
2156 };
2157
2158 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2159         .db_bell                = XEON_B2B_DOORBELL_OFFSET,
2160         .spad                   = XEON_B2B_SPAD_OFFSET,
2161 };
2162
2163 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
2164         /* Note: no primary .bar0_base visible to the secondary side.
2165          *
2166          * The secondary side cannot get the base address stored in primary
2167          * bars.  The base address is necessary to set the limit register to
2168          * any value other than zero, or unlimited.
2169          *
2170          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2171          * window by setting the limit equal to base, nor can it limit the size
2172          * of the memory window by setting the limit to base + size.
2173          */
2174         .bar2_limit             = XEON_PBAR23LMT_OFFSET,
2175         .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
2176 };
2177
2178 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2179         .bar0_base              = XEON_SBAR0BASE_OFFSET,
2180         .bar2_limit             = XEON_SBAR23LMT_OFFSET,
2181         .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
2182 };
2183
2184 static struct intel_b2b_addr xeon_b2b_usd_addr = {
2185         .bar2_addr64            = XEON_B2B_BAR2_USD_ADDR64,
2186         .bar4_addr64            = XEON_B2B_BAR4_USD_ADDR64,
2187         .bar4_addr32            = XEON_B2B_BAR4_USD_ADDR32,
2188         .bar5_addr32            = XEON_B2B_BAR5_USD_ADDR32,
2189 };
2190
2191 static struct intel_b2b_addr xeon_b2b_dsd_addr = {
2192         .bar2_addr64            = XEON_B2B_BAR2_DSD_ADDR64,
2193         .bar4_addr64            = XEON_B2B_BAR4_DSD_ADDR64,
2194         .bar4_addr32            = XEON_B2B_BAR4_DSD_ADDR32,
2195         .bar5_addr32            = XEON_B2B_BAR5_DSD_ADDR32,
2196 };
2197
2198 /* operations for primary side of local ntb */
2199 static const struct ntb_dev_ops intel_ntb_ops = {
2200         .mw_count               = intel_ntb_mw_count,
2201         .mw_get_range           = intel_ntb_mw_get_range,
2202         .mw_set_trans           = intel_ntb_mw_set_trans,
2203         .link_is_up             = intel_ntb_link_is_up,
2204         .link_enable            = intel_ntb_link_enable,
2205         .link_disable           = intel_ntb_link_disable,
2206         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2207         .db_valid_mask          = intel_ntb_db_valid_mask,
2208         .db_vector_count        = intel_ntb_db_vector_count,
2209         .db_vector_mask         = intel_ntb_db_vector_mask,
2210         .db_read                = intel_ntb_db_read,
2211         .db_clear               = intel_ntb_db_clear,
2212         .db_set_mask            = intel_ntb_db_set_mask,
2213         .db_clear_mask          = intel_ntb_db_clear_mask,
2214         .peer_db_addr           = intel_ntb_peer_db_addr,
2215         .peer_db_set            = intel_ntb_peer_db_set,
2216         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2217         .spad_count             = intel_ntb_spad_count,
2218         .spad_read              = intel_ntb_spad_read,
2219         .spad_write             = intel_ntb_spad_write,
2220         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2221         .peer_spad_read         = intel_ntb_peer_spad_read,
2222         .peer_spad_write        = intel_ntb_peer_spad_write,
2223 };
2224
2225 static const struct file_operations intel_ntb_debugfs_info = {
2226         .owner = THIS_MODULE,
2227         .open = simple_open,
2228         .read = ndev_debugfs_read,
2229 };
2230
2231 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2232         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2233         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2234         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2235         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2236         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2237         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2238         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2239         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2240         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2241         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2242         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2243         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2244         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2245         {0}
2246 };
2247 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2248
2249 static struct pci_driver intel_ntb_pci_driver = {
2250         .name = KBUILD_MODNAME,
2251         .id_table = intel_ntb_pci_tbl,
2252         .probe = intel_ntb_pci_probe,
2253         .remove = intel_ntb_pci_remove,
2254 };
2255
2256 static int __init intel_ntb_pci_driver_init(void)
2257 {
2258         pr_info("%s %s\n", NTB_DESC, NTB_VER);
2259
2260         if (debugfs_initialized())
2261                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2262
2263         return pci_register_driver(&intel_ntb_pci_driver);
2264 }
2265 module_init(intel_ntb_pci_driver_init);
2266
2267 static void __exit intel_ntb_pci_driver_exit(void)
2268 {
2269         pci_unregister_driver(&intel_ntb_pci_driver);
2270
2271         debugfs_remove_recursive(debugfs_dir);
2272 }
2273 module_exit(intel_ntb_pci_driver_exit);
2274
This page took 0.186081 seconds and 4 git commands to generate.