]> Git Repo - linux.git/blob - drivers/misc/cxl/native.c
tipc: guarantee that group broadcast doesn't bypass group unicast
[linux.git] / drivers / misc / cxl / native.c
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/mm.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <asm/synch.h>
19 #include <misc/cxl-base.h>
20
21 #include "cxl.h"
22 #include "trace.h"
23
24 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
25                        u64 result, u64 mask, bool enabled)
26 {
27         u64 AFU_Cntl;
28         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
29         int rc = 0;
30
31         spin_lock(&afu->afu_cntl_lock);
32         pr_devel("AFU command starting: %llx\n", command);
33
34         trace_cxl_afu_ctrl(afu, command);
35
36         AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37         cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
38
39         AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
40         while ((AFU_Cntl & mask) != result) {
41                 if (time_after_eq(jiffies, timeout)) {
42                         dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
43                         rc = -EBUSY;
44                         goto out;
45                 }
46
47                 if (!cxl_ops->link_ok(afu->adapter, afu)) {
48                         afu->enabled = enabled;
49                         rc = -EIO;
50                         goto out;
51                 }
52
53                 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
54                                      AFU_Cntl | command);
55                 cpu_relax();
56                 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
57         }
58
59         if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
60                 /*
61                  * Workaround for a bug in the XSL used in the Mellanox CX4
62                  * that fails to clear the RA bit after an AFU reset,
63                  * preventing subsequent AFU resets from working.
64                  */
65                 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
66         }
67
68         pr_devel("AFU command complete: %llx\n", command);
69         afu->enabled = enabled;
70 out:
71         trace_cxl_afu_ctrl_done(afu, command, rc);
72         spin_unlock(&afu->afu_cntl_lock);
73
74         return rc;
75 }
76
77 static int afu_enable(struct cxl_afu *afu)
78 {
79         pr_devel("AFU enable request\n");
80
81         return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
82                            CXL_AFU_Cntl_An_ES_Enabled,
83                            CXL_AFU_Cntl_An_ES_MASK, true);
84 }
85
86 int cxl_afu_disable(struct cxl_afu *afu)
87 {
88         pr_devel("AFU disable request\n");
89
90         return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
91                            CXL_AFU_Cntl_An_ES_Disabled,
92                            CXL_AFU_Cntl_An_ES_MASK, false);
93 }
94
95 /* This will disable as well as reset */
96 static int native_afu_reset(struct cxl_afu *afu)
97 {
98         int rc;
99         u64 serr;
100
101         pr_devel("AFU reset request\n");
102
103         rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
104                            CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
105                            CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
106                            false);
107
108         /*
109          * Re-enable any masked interrupts when the AFU is not
110          * activated to avoid side effects after attaching a process
111          * in dedicated mode.
112          */
113         if (afu->current_mode == 0) {
114                 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
115                 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
116                 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
117         }
118
119         return rc;
120 }
121
122 static int native_afu_check_and_enable(struct cxl_afu *afu)
123 {
124         if (!cxl_ops->link_ok(afu->adapter, afu)) {
125                 WARN(1, "Refusing to enable afu while link down!\n");
126                 return -EIO;
127         }
128         if (afu->enabled)
129                 return 0;
130         return afu_enable(afu);
131 }
132
133 int cxl_psl_purge(struct cxl_afu *afu)
134 {
135         u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
136         u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
137         u64 dsisr, dar;
138         u64 start, end;
139         u64 trans_fault = 0x0ULL;
140         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
141         int rc = 0;
142
143         trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
144
145         pr_devel("PSL purge request\n");
146
147         if (cxl_is_power8())
148                 trans_fault = CXL_PSL_DSISR_TRANS;
149         if (cxl_is_power9())
150                 trans_fault = CXL_PSL9_DSISR_An_TF;
151
152         if (!cxl_ops->link_ok(afu->adapter, afu)) {
153                 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
154                 rc = -EIO;
155                 goto out;
156         }
157
158         if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
159                 WARN(1, "psl_purge request while AFU not disabled!\n");
160                 cxl_afu_disable(afu);
161         }
162
163         cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
164                        PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
165         start = local_clock();
166         PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
167         while ((PSL_CNTL &  CXL_PSL_SCNTL_An_Ps_MASK)
168                         == CXL_PSL_SCNTL_An_Ps_Pending) {
169                 if (time_after_eq(jiffies, timeout)) {
170                         dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
171                         rc = -EBUSY;
172                         goto out;
173                 }
174                 if (!cxl_ops->link_ok(afu->adapter, afu)) {
175                         rc = -EIO;
176                         goto out;
177                 }
178
179                 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
180                 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n",
181                                      PSL_CNTL, dsisr);
182
183                 if (dsisr & trans_fault) {
184                         dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
185                         dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
186                                    dsisr, dar);
187                         cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
188                 } else if (dsisr) {
189                         dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
190                                    dsisr);
191                         cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
192                 } else {
193                         cpu_relax();
194                 }
195                 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
196         }
197         end = local_clock();
198         pr_devel("PSL purged in %lld ns\n", end - start);
199
200         cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
201                        PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
202 out:
203         trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
204         return rc;
205 }
206
207 static int spa_max_procs(int spa_size)
208 {
209         /*
210          * From the CAIA:
211          *    end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
212          * Most of that junk is really just an overly-complicated way of saying
213          * the last 256 bytes are __aligned(128), so it's really:
214          *    end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
215          * and
216          *    end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
217          * so
218          *    sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
219          * Ignore the alignment (which is safe in this case as long as we are
220          * careful with our rounding) and solve for n:
221          */
222         return ((spa_size / 8) - 96) / 17;
223 }
224
225 static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
226 {
227         unsigned spa_size;
228
229         /* Work out how many pages to allocate */
230         afu->native->spa_order = -1;
231         do {
232                 afu->native->spa_order++;
233                 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
234
235                 if (spa_size > 0x100000) {
236                         dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
237                                         afu->native->spa_max_procs, afu->native->spa_size);
238                         if (mode != CXL_MODE_DEDICATED)
239                                 afu->num_procs = afu->native->spa_max_procs;
240                         break;
241                 }
242
243                 afu->native->spa_size = spa_size;
244                 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
245         } while (afu->native->spa_max_procs < afu->num_procs);
246
247         if (!(afu->native->spa = (struct cxl_process_element *)
248               __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
249                 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
250                 return -ENOMEM;
251         }
252         pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
253                  1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
254
255         return 0;
256 }
257
258 static void attach_spa(struct cxl_afu *afu)
259 {
260         u64 spap;
261
262         afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
263                                             ((afu->native->spa_max_procs + 3) * 128));
264
265         spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
266         spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
267         spap |= CXL_PSL_SPAP_V;
268         pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
269                 afu->native->spa, afu->native->spa_max_procs,
270                 afu->native->sw_command_status, spap);
271         cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
272 }
273
274 static inline void detach_spa(struct cxl_afu *afu)
275 {
276         cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
277 }
278
279 void cxl_release_spa(struct cxl_afu *afu)
280 {
281         if (afu->native->spa) {
282                 free_pages((unsigned long) afu->native->spa,
283                         afu->native->spa_order);
284                 afu->native->spa = NULL;
285         }
286 }
287
288 /*
289  * Invalidation of all ERAT entries is no longer required by CAIA2. Use
290  * only for debug.
291  */
292 int cxl_invalidate_all_psl9(struct cxl *adapter)
293 {
294         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
295         u64 ierat;
296
297         pr_devel("CXL adapter - invalidation of all ERAT entries\n");
298
299         /* Invalidates all ERAT entries for Radix or HPT */
300         ierat = CXL_XSL9_IERAT_IALL;
301         if (radix_enabled())
302                 ierat |= CXL_XSL9_IERAT_INVR;
303         cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
304
305         while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
306                 if (time_after_eq(jiffies, timeout)) {
307                         dev_warn(&adapter->dev,
308                         "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
309                         return -EBUSY;
310                 }
311                 if (!cxl_ops->link_ok(adapter, NULL))
312                         return -EIO;
313                 cpu_relax();
314         }
315         return 0;
316 }
317
318 int cxl_invalidate_all_psl8(struct cxl *adapter)
319 {
320         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
321
322         pr_devel("CXL adapter wide TLBIA & SLBIA\n");
323
324         cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
325
326         cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
327         while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
328                 if (time_after_eq(jiffies, timeout)) {
329                         dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
330                         return -EBUSY;
331                 }
332                 if (!cxl_ops->link_ok(adapter, NULL))
333                         return -EIO;
334                 cpu_relax();
335         }
336
337         cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
338         while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
339                 if (time_after_eq(jiffies, timeout)) {
340                         dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
341                         return -EBUSY;
342                 }
343                 if (!cxl_ops->link_ok(adapter, NULL))
344                         return -EIO;
345                 cpu_relax();
346         }
347         return 0;
348 }
349
350 int cxl_data_cache_flush(struct cxl *adapter)
351 {
352         u64 reg;
353         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
354
355         pr_devel("Flushing data cache\n");
356
357         reg = cxl_p1_read(adapter, CXL_PSL_Control);
358         reg |= CXL_PSL_Control_Fr;
359         cxl_p1_write(adapter, CXL_PSL_Control, reg);
360
361         reg = cxl_p1_read(adapter, CXL_PSL_Control);
362         while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
363                 if (time_after_eq(jiffies, timeout)) {
364                         dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
365                         return -EBUSY;
366                 }
367
368                 if (!cxl_ops->link_ok(adapter, NULL)) {
369                         dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
370                         return -EIO;
371                 }
372                 cpu_relax();
373                 reg = cxl_p1_read(adapter, CXL_PSL_Control);
374         }
375
376         reg &= ~CXL_PSL_Control_Fr;
377         cxl_p1_write(adapter, CXL_PSL_Control, reg);
378         return 0;
379 }
380
381 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
382 {
383         int rc;
384
385         /* 1. Disable SSTP by writing 0 to SSTP1[V] */
386         cxl_p2n_write(afu, CXL_SSTP1_An, 0);
387
388         /* 2. Invalidate all SLB entries */
389         if ((rc = cxl_afu_slbia(afu)))
390                 return rc;
391
392         /* 3. Set SSTP0_An */
393         cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
394
395         /* 4. Set SSTP1_An */
396         cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
397
398         return 0;
399 }
400
401 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
402 static void slb_invalid(struct cxl_context *ctx)
403 {
404         struct cxl *adapter = ctx->afu->adapter;
405         u64 slbia;
406
407         WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
408
409         cxl_p1_write(adapter, CXL_PSL_LBISEL,
410                         ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
411                         be32_to_cpu(ctx->elem->lpid));
412         cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
413
414         while (1) {
415                 if (!cxl_ops->link_ok(adapter, NULL))
416                         break;
417                 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
418                 if (!(slbia & CXL_TLB_SLB_P))
419                         break;
420                 cpu_relax();
421         }
422 }
423
424 static int do_process_element_cmd(struct cxl_context *ctx,
425                                   u64 cmd, u64 pe_state)
426 {
427         u64 state;
428         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
429         int rc = 0;
430
431         trace_cxl_llcmd(ctx, cmd);
432
433         WARN_ON(!ctx->afu->enabled);
434
435         ctx->elem->software_state = cpu_to_be32(pe_state);
436         smp_wmb();
437         *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
438         smp_mb();
439         cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
440         while (1) {
441                 if (time_after_eq(jiffies, timeout)) {
442                         dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
443                         rc = -EBUSY;
444                         goto out;
445                 }
446                 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
447                         dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
448                         rc = -EIO;
449                         goto out;
450                 }
451                 state = be64_to_cpup(ctx->afu->native->sw_command_status);
452                 if (state == ~0ULL) {
453                         pr_err("cxl: Error adding process element to AFU\n");
454                         rc = -1;
455                         goto out;
456                 }
457                 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK  | CXL_SPA_SW_LINK_MASK)) ==
458                     (cmd | (cmd >> 16) | ctx->pe))
459                         break;
460                 /*
461                  * The command won't finish in the PSL if there are
462                  * outstanding DSIs.  Hence we need to yield here in
463                  * case there are outstanding DSIs that we need to
464                  * service.  Tuning possiblity: we could wait for a
465                  * while before sched
466                  */
467                 schedule();
468
469         }
470 out:
471         trace_cxl_llcmd_done(ctx, cmd, rc);
472         return rc;
473 }
474
475 static int add_process_element(struct cxl_context *ctx)
476 {
477         int rc = 0;
478
479         mutex_lock(&ctx->afu->native->spa_mutex);
480         pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
481         if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
482                 ctx->pe_inserted = true;
483         pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
484         mutex_unlock(&ctx->afu->native->spa_mutex);
485         return rc;
486 }
487
488 static int terminate_process_element(struct cxl_context *ctx)
489 {
490         int rc = 0;
491
492         /* fast path terminate if it's already invalid */
493         if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
494                 return rc;
495
496         mutex_lock(&ctx->afu->native->spa_mutex);
497         pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
498         /* We could be asked to terminate when the hw is down. That
499          * should always succeed: it's not running if the hw has gone
500          * away and is being reset.
501          */
502         if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
503                 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
504                                             CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
505         ctx->elem->software_state = 0;  /* Remove Valid bit */
506         pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
507         mutex_unlock(&ctx->afu->native->spa_mutex);
508         return rc;
509 }
510
511 static int remove_process_element(struct cxl_context *ctx)
512 {
513         int rc = 0;
514
515         mutex_lock(&ctx->afu->native->spa_mutex);
516         pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
517
518         /* We could be asked to remove when the hw is down. Again, if
519          * the hw is down, the PE is gone, so we succeed.
520          */
521         if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
522                 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
523
524         if (!rc)
525                 ctx->pe_inserted = false;
526         if (cxl_is_power8())
527                 slb_invalid(ctx);
528         pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
529         mutex_unlock(&ctx->afu->native->spa_mutex);
530
531         return rc;
532 }
533
534 void cxl_assign_psn_space(struct cxl_context *ctx)
535 {
536         if (!ctx->afu->pp_size || ctx->master) {
537                 ctx->psn_phys = ctx->afu->psn_phys;
538                 ctx->psn_size = ctx->afu->adapter->ps_size;
539         } else {
540                 ctx->psn_phys = ctx->afu->psn_phys +
541                         (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
542                 ctx->psn_size = ctx->afu->pp_size;
543         }
544 }
545
546 static int activate_afu_directed(struct cxl_afu *afu)
547 {
548         int rc;
549
550         dev_info(&afu->dev, "Activating AFU directed mode\n");
551
552         afu->num_procs = afu->max_procs_virtualised;
553         if (afu->native->spa == NULL) {
554                 if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
555                         return -ENOMEM;
556         }
557         attach_spa(afu);
558
559         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
560         if (cxl_is_power8())
561                 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
562         cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
563
564         afu->current_mode = CXL_MODE_DIRECTED;
565
566         if ((rc = cxl_chardev_m_afu_add(afu)))
567                 return rc;
568
569         if ((rc = cxl_sysfs_afu_m_add(afu)))
570                 goto err;
571
572         if ((rc = cxl_chardev_s_afu_add(afu)))
573                 goto err1;
574
575         return 0;
576 err1:
577         cxl_sysfs_afu_m_remove(afu);
578 err:
579         cxl_chardev_afu_remove(afu);
580         return rc;
581 }
582
583 #ifdef CONFIG_CPU_LITTLE_ENDIAN
584 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
585 #else
586 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
587 #endif
588
589 u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
590 {
591         u64 sr = 0;
592
593         set_endian(sr);
594         if (master)
595                 sr |= CXL_PSL_SR_An_MP;
596         if (mfspr(SPRN_LPCR) & LPCR_TC)
597                 sr |= CXL_PSL_SR_An_TC;
598         if (kernel) {
599                 if (!real_mode)
600                         sr |= CXL_PSL_SR_An_R;
601                 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
602         } else {
603                 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
604                 if (radix_enabled())
605                         sr |= CXL_PSL_SR_An_HV;
606                 else
607                         sr &= ~(CXL_PSL_SR_An_HV);
608                 if (!test_tsk_thread_flag(current, TIF_32BIT))
609                         sr |= CXL_PSL_SR_An_SF;
610         }
611         if (p9) {
612                 if (radix_enabled())
613                         sr |= CXL_PSL_SR_An_XLAT_ror;
614                 else
615                         sr |= CXL_PSL_SR_An_XLAT_hpt;
616         }
617         return sr;
618 }
619
620 static u64 calculate_sr(struct cxl_context *ctx)
621 {
622         return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode,
623                                 cxl_is_power9());
624 }
625
626 static void update_ivtes_directed(struct cxl_context *ctx)
627 {
628         bool need_update = (ctx->status == STARTED);
629         int r;
630
631         if (need_update) {
632                 WARN_ON(terminate_process_element(ctx));
633                 WARN_ON(remove_process_element(ctx));
634         }
635
636         for (r = 0; r < CXL_IRQ_RANGES; r++) {
637                 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
638                 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
639         }
640
641         /*
642          * Theoretically we could use the update llcmd, instead of a
643          * terminate/remove/add (or if an atomic update was required we could
644          * do a suspend/update/resume), however it seems there might be issues
645          * with the update llcmd on some cards (including those using an XSL on
646          * an ASIC) so for now it's safest to go with the commands that are
647          * known to work. In the future if we come across a situation where the
648          * card may be performing transactions using the same PE while we are
649          * doing this update we might need to revisit this.
650          */
651         if (need_update)
652                 WARN_ON(add_process_element(ctx));
653 }
654
655 static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
656 {
657         u32 pid;
658
659         cxl_assign_psn_space(ctx);
660
661         ctx->elem->ctxtime = 0; /* disable */
662         ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
663         ctx->elem->haurp = 0; /* disable */
664
665         if (ctx->kernel)
666                 pid = 0;
667         else {
668                 if (ctx->mm == NULL) {
669                         pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
670                                 __func__, ctx->pe, pid_nr(ctx->pid));
671                         return -EINVAL;
672                 }
673                 pid = ctx->mm->context.id;
674         }
675
676         ctx->elem->common.tid = 0;
677         ctx->elem->common.pid = cpu_to_be32(pid);
678
679         ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
680
681         ctx->elem->common.csrp = 0; /* disable */
682
683         cxl_prefault(ctx, wed);
684
685         /*
686          * Ensure we have the multiplexed PSL interrupt set up to take faults
687          * for kernel contexts that may not have allocated any AFU IRQs at all:
688          */
689         if (ctx->irqs.range[0] == 0) {
690                 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
691                 ctx->irqs.range[0] = 1;
692         }
693
694         ctx->elem->common.amr = cpu_to_be64(amr);
695         ctx->elem->common.wed = cpu_to_be64(wed);
696
697         return 0;
698 }
699
700 int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
701 {
702         int result;
703
704         /* fill the process element entry */
705         result = process_element_entry_psl9(ctx, wed, amr);
706         if (result)
707                 return result;
708
709         update_ivtes_directed(ctx);
710
711         /* first guy needs to enable */
712         result = cxl_ops->afu_check_and_enable(ctx->afu);
713         if (result)
714                 return result;
715
716         return add_process_element(ctx);
717 }
718
719 int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
720 {
721         u32 pid;
722         int result;
723
724         cxl_assign_psn_space(ctx);
725
726         ctx->elem->ctxtime = 0; /* disable */
727         ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
728         ctx->elem->haurp = 0; /* disable */
729         ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
730
731         pid = current->pid;
732         if (ctx->kernel)
733                 pid = 0;
734         ctx->elem->common.tid = 0;
735         ctx->elem->common.pid = cpu_to_be32(pid);
736
737         ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
738
739         ctx->elem->common.csrp = 0; /* disable */
740         ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
741         ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
742
743         cxl_prefault(ctx, wed);
744
745         ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
746         ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
747
748         /*
749          * Ensure we have the multiplexed PSL interrupt set up to take faults
750          * for kernel contexts that may not have allocated any AFU IRQs at all:
751          */
752         if (ctx->irqs.range[0] == 0) {
753                 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
754                 ctx->irqs.range[0] = 1;
755         }
756
757         update_ivtes_directed(ctx);
758
759         ctx->elem->common.amr = cpu_to_be64(amr);
760         ctx->elem->common.wed = cpu_to_be64(wed);
761
762         /* first guy needs to enable */
763         if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
764                 return result;
765
766         return add_process_element(ctx);
767 }
768
769 static int deactivate_afu_directed(struct cxl_afu *afu)
770 {
771         dev_info(&afu->dev, "Deactivating AFU directed mode\n");
772
773         afu->current_mode = 0;
774         afu->num_procs = 0;
775
776         cxl_sysfs_afu_m_remove(afu);
777         cxl_chardev_afu_remove(afu);
778
779         /*
780          * The CAIA section 2.2.1 indicates that the procedure for starting and
781          * stopping an AFU in AFU directed mode is AFU specific, which is not
782          * ideal since this code is generic and with one exception has no
783          * knowledge of the AFU. This is in contrast to the procedure for
784          * disabling a dedicated process AFU, which is documented to just
785          * require a reset. The architecture does indicate that both an AFU
786          * reset and an AFU disable should result in the AFU being disabled and
787          * we do both followed by a PSL purge for safety.
788          *
789          * Notably we used to have some issues with the disable sequence on PSL
790          * cards, which is why we ended up using this heavy weight procedure in
791          * the first place, however a bug was discovered that had rendered the
792          * disable operation ineffective, so it is conceivable that was the
793          * sole explanation for those difficulties. Careful regression testing
794          * is recommended if anyone attempts to remove or reorder these
795          * operations.
796          *
797          * The XSL on the Mellanox CX4 behaves a little differently from the
798          * PSL based cards and will time out an AFU reset if the AFU is still
799          * enabled. That card is special in that we do have a means to identify
800          * it from this code, so in that case we skip the reset and just use a
801          * disable/purge to avoid the timeout and corresponding noise in the
802          * kernel log.
803          */
804         if (afu->adapter->native->sl_ops->needs_reset_before_disable)
805                 cxl_ops->afu_reset(afu);
806         cxl_afu_disable(afu);
807         cxl_psl_purge(afu);
808
809         return 0;
810 }
811
812 int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
813 {
814         dev_info(&afu->dev, "Activating dedicated process mode\n");
815
816         /*
817          * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
818          * XSL and AFU are programmed to work with a single context.
819          * The context information should be configured in the SPA area
820          * index 0 (so PSL_SPAP must be configured before enabling the
821          * AFU).
822          */
823         afu->num_procs = 1;
824         if (afu->native->spa == NULL) {
825                 if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
826                         return -ENOMEM;
827         }
828         attach_spa(afu);
829
830         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
831         cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
832
833         afu->current_mode = CXL_MODE_DEDICATED;
834
835         return cxl_chardev_d_afu_add(afu);
836 }
837
838 int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
839 {
840         dev_info(&afu->dev, "Activating dedicated process mode\n");
841
842         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
843
844         cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
845         cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);    /* disable */
846         cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
847         cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
848         cxl_p1n_write(afu, CXL_HAURP_An, 0);       /* disable */
849         cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
850
851         cxl_p2n_write(afu, CXL_CSRP_An, 0);        /* disable */
852         cxl_p2n_write(afu, CXL_AURP0_An, 0);       /* disable */
853         cxl_p2n_write(afu, CXL_AURP1_An, 0);       /* disable */
854
855         afu->current_mode = CXL_MODE_DEDICATED;
856         afu->num_procs = 1;
857
858         return cxl_chardev_d_afu_add(afu);
859 }
860
861 void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
862 {
863         int r;
864
865         for (r = 0; r < CXL_IRQ_RANGES; r++) {
866                 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
867                 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
868         }
869 }
870
871 void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
872 {
873         struct cxl_afu *afu = ctx->afu;
874
875         cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
876                        (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
877                        (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
878                        (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
879                         ((u64)ctx->irqs.offset[3] & 0xffff));
880         cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
881                        (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
882                        (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
883                        (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
884                         ((u64)ctx->irqs.range[3] & 0xffff));
885 }
886
887 int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
888 {
889         struct cxl_afu *afu = ctx->afu;
890         int result;
891
892         /* fill the process element entry */
893         result = process_element_entry_psl9(ctx, wed, amr);
894         if (result)
895                 return result;
896
897         if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
898                 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
899
900         result = cxl_ops->afu_reset(afu);
901         if (result)
902                 return result;
903
904         return afu_enable(afu);
905 }
906
907 int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
908 {
909         struct cxl_afu *afu = ctx->afu;
910         u64 pid;
911         int rc;
912
913         pid = (u64)current->pid << 32;
914         if (ctx->kernel)
915                 pid = 0;
916         cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
917
918         cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
919
920         if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
921                 return rc;
922
923         cxl_prefault(ctx, wed);
924
925         if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
926                 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
927
928         cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
929
930         /* master only context for dedicated */
931         cxl_assign_psn_space(ctx);
932
933         if ((rc = cxl_ops->afu_reset(afu)))
934                 return rc;
935
936         cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
937
938         return afu_enable(afu);
939 }
940
941 static int deactivate_dedicated_process(struct cxl_afu *afu)
942 {
943         dev_info(&afu->dev, "Deactivating dedicated process mode\n");
944
945         afu->current_mode = 0;
946         afu->num_procs = 0;
947
948         cxl_chardev_afu_remove(afu);
949
950         return 0;
951 }
952
953 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
954 {
955         if (mode == CXL_MODE_DIRECTED)
956                 return deactivate_afu_directed(afu);
957         if (mode == CXL_MODE_DEDICATED)
958                 return deactivate_dedicated_process(afu);
959         return 0;
960 }
961
962 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
963 {
964         if (!mode)
965                 return 0;
966         if (!(mode & afu->modes_supported))
967                 return -EINVAL;
968
969         if (!cxl_ops->link_ok(afu->adapter, afu)) {
970                 WARN(1, "Device link is down, refusing to activate!\n");
971                 return -EIO;
972         }
973
974         if (mode == CXL_MODE_DIRECTED)
975                 return activate_afu_directed(afu);
976         if ((mode == CXL_MODE_DEDICATED) &&
977             (afu->adapter->native->sl_ops->activate_dedicated_process))
978                 return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
979
980         return -EINVAL;
981 }
982
983 static int native_attach_process(struct cxl_context *ctx, bool kernel,
984                                 u64 wed, u64 amr)
985 {
986         if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
987                 WARN(1, "Device link is down, refusing to attach process!\n");
988                 return -EIO;
989         }
990
991         ctx->kernel = kernel;
992         if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
993             (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
994                 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
995
996         if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
997             (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
998                 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
999
1000         return -EINVAL;
1001 }
1002
1003 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
1004 {
1005         /*
1006          * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
1007          * stop the AFU in dedicated mode (we therefore do not make that
1008          * optional like we do in the afu directed path). It does not indicate
1009          * that we need to do an explicit disable (which should occur
1010          * implicitly as part of the reset) or purge, but we do these as well
1011          * to be on the safe side.
1012          *
1013          * Notably we used to have some issues with the disable sequence
1014          * (before the sequence was spelled out in the architecture) which is
1015          * why we were so heavy weight in the first place, however a bug was
1016          * discovered that had rendered the disable operation ineffective, so
1017          * it is conceivable that was the sole explanation for those
1018          * difficulties. Point is, we should be careful and do some regression
1019          * testing if we ever attempt to remove any part of this procedure.
1020          */
1021         cxl_ops->afu_reset(ctx->afu);
1022         cxl_afu_disable(ctx->afu);
1023         cxl_psl_purge(ctx->afu);
1024         return 0;
1025 }
1026
1027 static void native_update_ivtes(struct cxl_context *ctx)
1028 {
1029         if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
1030                 return update_ivtes_directed(ctx);
1031         if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1032             (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
1033                 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
1034         WARN(1, "native_update_ivtes: Bad mode\n");
1035 }
1036
1037 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
1038 {
1039         if (!ctx->pe_inserted)
1040                 return 0;
1041         if (terminate_process_element(ctx))
1042                 return -1;
1043         if (remove_process_element(ctx))
1044                 return -1;
1045
1046         return 0;
1047 }
1048
1049 static int native_detach_process(struct cxl_context *ctx)
1050 {
1051         trace_cxl_detach(ctx);
1052
1053         if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
1054                 return detach_process_native_dedicated(ctx);
1055
1056         return detach_process_native_afu_directed(ctx);
1057 }
1058
1059 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
1060 {
1061         /* If the adapter has gone away, we can't get any meaningful
1062          * information.
1063          */
1064         if (!cxl_ops->link_ok(afu->adapter, afu))
1065                 return -EIO;
1066
1067         info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1068         info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
1069         if (cxl_is_power8())
1070                 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
1071         info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1072         info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1073         info->proc_handle = 0;
1074
1075         return 0;
1076 }
1077
1078 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
1079 {
1080         u64 fir1, fir2, serr;
1081
1082         fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
1083         fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR2);
1084
1085         dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1086         dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
1087         if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1088                 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1089                 cxl_afu_decode_psl_serr(ctx->afu, serr);
1090         }
1091 }
1092
1093 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
1094 {
1095         u64 fir1, fir2, fir_slice, serr, afu_debug;
1096
1097         fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
1098         fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
1099         fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
1100         afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
1101
1102         dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1103         dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
1104         if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1105                 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1106                 cxl_afu_decode_psl_serr(ctx->afu, serr);
1107         }
1108         dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1109         dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1110 }
1111
1112 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
1113                                                 u64 dsisr, u64 errstat)
1114 {
1115
1116         dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
1117
1118         if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
1119                 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
1120
1121         if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
1122                 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
1123                 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
1124         }
1125
1126         return cxl_ops->ack_irq(ctx, 0, errstat);
1127 }
1128
1129 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1130 {
1131         if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1132                 return true;
1133
1134         if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1135                 return true;
1136
1137         return false;
1138 }
1139
1140 irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
1141 {
1142         if (cxl_is_translation_fault(afu, irq_info->dsisr))
1143                 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1144         else
1145                 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1146
1147         return IRQ_HANDLED;
1148 }
1149
1150 static irqreturn_t native_irq_multiplexed(int irq, void *data)
1151 {
1152         struct cxl_afu *afu = data;
1153         struct cxl_context *ctx;
1154         struct cxl_irq_info irq_info;
1155         u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
1156         int ph, ret = IRQ_HANDLED, res;
1157
1158         /* check if eeh kicked in while the interrupt was in flight */
1159         if (unlikely(phreg == ~0ULL)) {
1160                 dev_warn(&afu->dev,
1161                          "Ignoring slice interrupt(%d) due to fenced card",
1162                          irq);
1163                 return IRQ_HANDLED;
1164         }
1165         /* Mask the pe-handle from register value */
1166         ph = phreg & 0xffff;
1167         if ((res = native_get_irq_info(afu, &irq_info))) {
1168                 WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
1169                 if (afu->adapter->native->sl_ops->fail_irq)
1170                         return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1171                 return ret;
1172         }
1173
1174         rcu_read_lock();
1175         ctx = idr_find(&afu->contexts_idr, ph);
1176         if (ctx) {
1177                 if (afu->adapter->native->sl_ops->handle_interrupt)
1178                         ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
1179                 rcu_read_unlock();
1180                 return ret;
1181         }
1182         rcu_read_unlock();
1183
1184         WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
1185                 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
1186                 " with outstanding transactions?)\n", ph, irq_info.dsisr,
1187                 irq_info.dar);
1188         if (afu->adapter->native->sl_ops->fail_irq)
1189                 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1190         return ret;
1191 }
1192
1193 static void native_irq_wait(struct cxl_context *ctx)
1194 {
1195         u64 dsisr;
1196         int timeout = 1000;
1197         int ph;
1198
1199         /*
1200          * Wait until no further interrupts are presented by the PSL
1201          * for this context.
1202          */
1203         while (timeout--) {
1204                 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
1205                 if (ph != ctx->pe)
1206                         return;
1207                 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1208                 if (cxl_is_power8() &&
1209                    ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
1210                         return;
1211                 if (cxl_is_power9() &&
1212                    ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1213                         return;
1214                 /*
1215                  * We are waiting for the workqueue to process our
1216                  * irq, so need to let that run here.
1217                  */
1218                 msleep(1);
1219         }
1220
1221         dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
1222                  " DSISR %016llx!\n", ph, dsisr);
1223         return;
1224 }
1225
1226 static irqreturn_t native_slice_irq_err(int irq, void *data)
1227 {
1228         struct cxl_afu *afu = data;
1229         u64 errstat, serr, afu_error, dsisr;
1230         u64 fir_slice, afu_debug, irq_mask;
1231
1232         /*
1233          * slice err interrupt is only used with full PSL (no XSL)
1234          */
1235         serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1236         errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1237         afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1238         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1239         cxl_afu_decode_psl_serr(afu, serr);
1240
1241         if (cxl_is_power8()) {
1242                 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1243                 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1244                 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1245                 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1246         }
1247         dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
1248         dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
1249         dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1250
1251         /* mask off the IRQ so it won't retrigger until the AFU is reset */
1252         irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
1253         serr |= irq_mask;
1254         cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1255         dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
1256
1257         return IRQ_HANDLED;
1258 }
1259
1260 void cxl_native_err_irq_dump_regs(struct cxl *adapter)
1261 {
1262         u64 fir1, fir2;
1263
1264         fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
1265         fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1266
1267         dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
1268 }
1269
1270 static irqreturn_t native_irq_err(int irq, void *data)
1271 {
1272         struct cxl *adapter = data;
1273         u64 err_ivte;
1274
1275         WARN(1, "CXL ERROR interrupt %i\n", irq);
1276
1277         err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1278         dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1279
1280         if (adapter->native->sl_ops->debugfs_stop_trace) {
1281                 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1282                 adapter->native->sl_ops->debugfs_stop_trace(adapter);
1283         }
1284
1285         if (adapter->native->sl_ops->err_irq_dump_registers)
1286                 adapter->native->sl_ops->err_irq_dump_registers(adapter);
1287
1288         return IRQ_HANDLED;
1289 }
1290
1291 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1292 {
1293         int rc;
1294
1295         adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1296                                       dev_name(&adapter->dev));
1297         if (!adapter->irq_name)
1298                 return -ENOMEM;
1299
1300         if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1301                                        &adapter->native->err_hwirq,
1302                                        &adapter->native->err_virq,
1303                                        adapter->irq_name))) {
1304                 kfree(adapter->irq_name);
1305                 adapter->irq_name = NULL;
1306                 return rc;
1307         }
1308
1309         cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1310
1311         return 0;
1312 }
1313
1314 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1315 {
1316         if (adapter->native->err_virq == 0 ||
1317             adapter->native->err_virq !=
1318             irq_find_mapping(NULL, adapter->native->err_hwirq))
1319                 return;
1320
1321         cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1322         cxl_unmap_irq(adapter->native->err_virq, adapter);
1323         cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1324         kfree(adapter->irq_name);
1325         adapter->native->err_virq = 0;
1326 }
1327
1328 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1329 {
1330         u64 serr;
1331         int rc;
1332
1333         afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1334                                       dev_name(&afu->dev));
1335         if (!afu->err_irq_name)
1336                 return -ENOMEM;
1337
1338         if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1339                                        &afu->serr_hwirq,
1340                                        &afu->serr_virq, afu->err_irq_name))) {
1341                 kfree(afu->err_irq_name);
1342                 afu->err_irq_name = NULL;
1343                 return rc;
1344         }
1345
1346         serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1347         if (cxl_is_power8())
1348                 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1349         if (cxl_is_power9()) {
1350                 /*
1351                  * By default, all errors are masked. So don't set all masks.
1352                  * Slice errors will be transfered.
1353                  */
1354                 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
1355         }
1356         cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1357
1358         return 0;
1359 }
1360
1361 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1362 {
1363         if (afu->serr_virq == 0 ||
1364             afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1365                 return;
1366
1367         cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1368         cxl_unmap_irq(afu->serr_virq, afu);
1369         cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1370         kfree(afu->err_irq_name);
1371         afu->serr_virq = 0;
1372 }
1373
1374 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1375 {
1376         int rc;
1377
1378         afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1379                                       dev_name(&afu->dev));
1380         if (!afu->psl_irq_name)
1381                 return -ENOMEM;
1382
1383         if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1384                                     afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1385                                     afu->psl_irq_name))) {
1386                 kfree(afu->psl_irq_name);
1387                 afu->psl_irq_name = NULL;
1388         }
1389         return rc;
1390 }
1391
1392 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1393 {
1394         if (afu->native->psl_virq == 0 ||
1395             afu->native->psl_virq !=
1396             irq_find_mapping(NULL, afu->native->psl_hwirq))
1397                 return;
1398
1399         cxl_unmap_irq(afu->native->psl_virq, afu);
1400         cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1401         kfree(afu->psl_irq_name);
1402         afu->native->psl_virq = 0;
1403 }
1404
1405 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1406 {
1407         u64 dsisr;
1408
1409         pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1410
1411         /* Clear PSL_DSISR[PE] */
1412         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1413         cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1414
1415         /* Write 1s to clear error status bits */
1416         cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1417 }
1418
1419 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1420 {
1421         trace_cxl_psl_irq_ack(ctx, tfc);
1422         if (tfc)
1423                 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1424         if (psl_reset_mask)
1425                 recover_psl_err(ctx->afu, psl_reset_mask);
1426
1427         return 0;
1428 }
1429
1430 int cxl_check_error(struct cxl_afu *afu)
1431 {
1432         return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1433 }
1434
1435 static bool native_support_attributes(const char *attr_name,
1436                                       enum cxl_attrs type)
1437 {
1438         return true;
1439 }
1440
1441 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1442 {
1443         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1444                 return -EIO;
1445         if (unlikely(off >= afu->crs_len))
1446                 return -ERANGE;
1447         *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1448                 (cr * afu->crs_len) + off);
1449         return 0;
1450 }
1451
1452 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1453 {
1454         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1455                 return -EIO;
1456         if (unlikely(off >= afu->crs_len))
1457                 return -ERANGE;
1458         *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1459                 (cr * afu->crs_len) + off);
1460         return 0;
1461 }
1462
1463 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1464 {
1465         u64 aligned_off = off & ~0x3L;
1466         u32 val;
1467         int rc;
1468
1469         rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1470         if (!rc)
1471                 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1472         return rc;
1473 }
1474
1475 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1476 {
1477         u64 aligned_off = off & ~0x3L;
1478         u32 val;
1479         int rc;
1480
1481         rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1482         if (!rc)
1483                 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1484         return rc;
1485 }
1486
1487 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1488 {
1489         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1490                 return -EIO;
1491         if (unlikely(off >= afu->crs_len))
1492                 return -ERANGE;
1493         out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1494                 (cr * afu->crs_len) + off, in);
1495         return 0;
1496 }
1497
1498 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1499 {
1500         u64 aligned_off = off & ~0x3L;
1501         u32 val32, mask, shift;
1502         int rc;
1503
1504         rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1505         if (rc)
1506                 return rc;
1507         shift = (off & 0x3) * 8;
1508         WARN_ON(shift == 24);
1509         mask = 0xffff << shift;
1510         val32 = (val32 & ~mask) | (in << shift);
1511
1512         rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1513         return rc;
1514 }
1515
1516 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1517 {
1518         u64 aligned_off = off & ~0x3L;
1519         u32 val32, mask, shift;
1520         int rc;
1521
1522         rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1523         if (rc)
1524                 return rc;
1525         shift = (off & 0x3) * 8;
1526         mask = 0xff << shift;
1527         val32 = (val32 & ~mask) | (in << shift);
1528
1529         rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1530         return rc;
1531 }
1532
1533 const struct cxl_backend_ops cxl_native_ops = {
1534         .module = THIS_MODULE,
1535         .adapter_reset = cxl_pci_reset,
1536         .alloc_one_irq = cxl_pci_alloc_one_irq,
1537         .release_one_irq = cxl_pci_release_one_irq,
1538         .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1539         .release_irq_ranges = cxl_pci_release_irq_ranges,
1540         .setup_irq = cxl_pci_setup_irq,
1541         .handle_psl_slice_error = native_handle_psl_slice_error,
1542         .psl_interrupt = NULL,
1543         .ack_irq = native_ack_irq,
1544         .irq_wait = native_irq_wait,
1545         .attach_process = native_attach_process,
1546         .detach_process = native_detach_process,
1547         .update_ivtes = native_update_ivtes,
1548         .support_attributes = native_support_attributes,
1549         .link_ok = cxl_adapter_link_ok,
1550         .release_afu = cxl_pci_release_afu,
1551         .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1552         .afu_check_and_enable = native_afu_check_and_enable,
1553         .afu_activate_mode = native_afu_activate_mode,
1554         .afu_deactivate_mode = native_afu_deactivate_mode,
1555         .afu_reset = native_afu_reset,
1556         .afu_cr_read8 = native_afu_cr_read8,
1557         .afu_cr_read16 = native_afu_cr_read16,
1558         .afu_cr_read32 = native_afu_cr_read32,
1559         .afu_cr_read64 = native_afu_cr_read64,
1560         .afu_cr_write8 = native_afu_cr_write8,
1561         .afu_cr_write16 = native_afu_cr_write16,
1562         .afu_cr_write32 = native_afu_cr_write32,
1563         .read_adapter_vpd = cxl_pci_read_adapter_vpd,
1564 };
This page took 0.124185 seconds and 4 git commands to generate.