2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock);
93 static long ehca_plpar_hcall_norets(unsigned long opcode,
104 unsigned long flags = 0;
106 if (unlikely(ehca_debug_level >= 2))
107 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
108 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
110 for (i = 0; i < 5; i++) {
111 /* serialize hCalls to work around firmware issue */
112 if (ehca_lock_hcalls)
113 spin_lock_irqsave(&hcall_lock, flags);
115 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
118 if (ehca_lock_hcalls)
119 spin_unlock_irqrestore(&hcall_lock, flags);
121 if (H_IS_LONG_BUSY(ret)) {
122 sleep_msecs = get_longbusy_msecs(ret);
123 msleep_interruptible(sleep_msecs);
128 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
129 opcode, ret, arg1, arg2, arg3,
130 arg4, arg5, arg6, arg7);
132 if (unlikely(ehca_debug_level >= 2))
133 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
141 static long ehca_plpar_hcall9(unsigned long opcode,
142 unsigned long *outs, /* array of 9 outputs */
155 unsigned long flags = 0;
157 if (unlikely(ehca_debug_level >= 2))
158 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
159 arg1, arg2, arg3, arg4, arg5,
160 arg6, arg7, arg8, arg9);
162 for (i = 0; i < 5; i++) {
163 /* serialize hCalls to work around firmware issue */
164 if (ehca_lock_hcalls)
165 spin_lock_irqsave(&hcall_lock, flags);
167 ret = plpar_hcall9(opcode, outs,
168 arg1, arg2, arg3, arg4, arg5,
169 arg6, arg7, arg8, arg9);
171 if (ehca_lock_hcalls)
172 spin_unlock_irqrestore(&hcall_lock, flags);
174 if (H_IS_LONG_BUSY(ret)) {
175 sleep_msecs = get_longbusy_msecs(ret);
176 msleep_interruptible(sleep_msecs);
180 if (ret < H_SUCCESS) {
181 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
182 opcode, arg1, arg2, arg3, arg4, arg5,
183 arg6, arg7, arg8, arg9);
184 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
185 ret, outs[0], outs[1], outs[2], outs[3],
186 outs[4], outs[5], outs[6], outs[7],
188 } else if (unlikely(ehca_debug_level >= 2))
189 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
190 ret, outs[0], outs[1], outs[2], outs[3],
191 outs[4], outs[5], outs[6], outs[7],
199 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
200 struct ehca_pfeq *pfeq,
201 const u32 neq_control,
202 const u32 number_of_entries,
203 struct ipz_eq_handle *eq_handle,
204 u32 *act_nr_of_entries,
209 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
210 u64 allocate_controls;
213 allocate_controls = 3ULL;
215 /* ISN is associated */
216 if (neq_control != 1)
217 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
218 else /* notification event queue */
219 allocate_controls = (1ULL << 63) | allocate_controls;
221 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
222 adapter_handle.handle, /* r4 */
223 allocate_controls, /* r5 */
224 number_of_entries, /* r6 */
226 eq_handle->handle = outs[0];
227 *act_nr_of_entries = (u32)outs[3];
228 *act_pages = (u32)outs[4];
229 *eq_ist = (u32)outs[5];
231 if (ret == H_NOT_ENOUGH_RESOURCES)
232 ehca_gen_err("Not enough resource - ret=%lli ", ret);
237 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
238 struct ipz_eq_handle eq_handle,
239 const u64 event_mask)
241 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
242 adapter_handle.handle, /* r4 */
243 eq_handle.handle, /* r5 */
248 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
250 struct ehca_alloc_cq_parms *param)
254 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
256 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
257 adapter_handle.handle, /* r4 */
259 param->eq_handle.handle, /* r6 */
261 param->nr_cqe, /* r8 */
263 cq->ipz_cq_handle.handle = outs[0];
264 param->act_nr_of_entries = (u32)outs[3];
265 param->act_pages = (u32)outs[4];
267 if (ret == H_SUCCESS) {
268 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
270 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
273 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
274 adapter_handle.handle, /* r4 */
275 cq->ipz_cq_handle.handle, /* r5 */
281 if (ret == H_NOT_ENOUGH_RESOURCES)
282 ehca_gen_err("Not enough resources. ret=%lli", ret);
287 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
288 struct ehca_alloc_qp_parms *parms, int is_user)
292 u64 allocate_controls, max_r10_reg, r11, r12;
293 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
296 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
297 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
298 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
299 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
300 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
302 parms->squeue.page_size)
303 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
304 parms->rqueue.page_size)
305 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
306 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
307 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
308 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
309 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
310 parms->ud_av_l_key_ctl)
311 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
314 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
315 parms->squeue.max_wr + 1)
316 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
317 parms->rqueue.max_wr + 1)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
319 parms->squeue.max_sge)
320 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
321 parms->rqueue.max_sge);
323 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
325 if (parms->ext_type == EQPT_SRQ)
326 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
328 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
330 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
331 adapter_handle.handle, /* r4 */
332 allocate_controls, /* r5 */
333 parms->send_cq_handle.handle,
334 parms->recv_cq_handle.handle,
335 parms->eq_handle.handle,
336 ((u64)parms->token << 32) | parms->pd.value,
337 max_r10_reg, r11, r12);
339 parms->qp_handle.handle = outs[0];
340 parms->real_qp_num = (u32)outs[1];
341 parms->squeue.act_nr_wqes =
342 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
343 parms->rqueue.act_nr_wqes =
344 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
345 parms->squeue.act_nr_sges =
346 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
347 parms->rqueue.act_nr_sges =
348 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
349 parms->squeue.queue_size =
350 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
351 parms->rqueue.queue_size =
352 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
354 if (ret == H_SUCCESS) {
355 rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
357 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
360 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
361 adapter_handle.handle, /* r4 */
362 parms->qp_handle.handle, /* r5 */
368 if (ret == H_NOT_ENOUGH_RESOURCES)
369 ehca_gen_err("Not enough resources. ret=%lli", ret);
374 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
376 struct hipz_query_port *query_port_response_block)
379 u64 r_cb = __pa(query_port_response_block);
381 if (r_cb & (EHCA_PAGESIZE-1)) {
382 ehca_gen_err("response block not page aligned");
386 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
387 adapter_handle.handle, /* r4 */
392 if (ehca_debug_level >= 2)
393 ehca_dmp(query_port_response_block, 64, "response_block");
398 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
399 const u8 port_id, const u32 port_cap,
400 const u8 init_type, const int modify_mask)
402 u64 port_attributes = port_cap;
404 if (modify_mask & IB_PORT_SHUTDOWN)
405 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
406 if (modify_mask & IB_PORT_INIT_TYPE)
407 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
408 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
409 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
411 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
412 adapter_handle.handle, /* r4 */
414 port_attributes, /* r6 */
418 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
419 struct hipz_query_hca *query_hca_rblock)
421 u64 r_cb = __pa(query_hca_rblock);
423 if (r_cb & (EHCA_PAGESIZE-1)) {
424 ehca_gen_err("response_block=%p not page aligned",
429 return ehca_plpar_hcall_norets(H_QUERY_HCA,
430 adapter_handle.handle, /* r4 */
435 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
438 const u64 resource_handle,
439 const u64 logical_address_of_page,
442 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
443 adapter_handle.handle, /* r4 */
444 (u64)queue_type | ((u64)pagesize) << 8,
446 resource_handle, /* r6 */
447 logical_address_of_page, /* r7 */
452 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
453 const struct ipz_eq_handle eq_handle,
454 struct ehca_pfeq *pfeq,
457 const u64 logical_address_of_page,
461 ehca_gen_err("Ppage counter=%llx", count);
464 return hipz_h_register_rpage(adapter_handle,
468 logical_address_of_page, count);
471 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
475 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
476 adapter_handle.handle, /* r4 */
480 if (ret != H_SUCCESS && ret != H_BUSY)
481 ehca_gen_err("Could not query interrupt state.");
486 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
487 const struct ipz_cq_handle cq_handle,
488 struct ehca_pfcq *pfcq,
491 const u64 logical_address_of_page,
493 const struct h_galpa gal)
496 ehca_gen_err("Page counter=%llx", count);
500 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
501 cq_handle.handle, logical_address_of_page,
505 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
506 const struct ipz_qp_handle qp_handle,
507 struct ehca_pfqp *pfqp,
510 const u64 logical_address_of_page,
512 const struct h_galpa galpa)
515 ehca_gen_err("Page counter=%llx", count);
519 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
520 qp_handle.handle, logical_address_of_page,
524 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
525 const struct ipz_qp_handle qp_handle,
526 struct ehca_pfqp *pfqp,
527 void **log_addr_next_sq_wqe2processed,
528 void **log_addr_next_rq_wqe2processed,
529 int dis_and_get_function_code)
532 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
534 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
535 adapter_handle.handle, /* r4 */
536 dis_and_get_function_code, /* r5 */
537 qp_handle.handle, /* r6 */
539 if (log_addr_next_sq_wqe2processed)
540 *log_addr_next_sq_wqe2processed = (void *)outs[0];
541 if (log_addr_next_rq_wqe2processed)
542 *log_addr_next_rq_wqe2processed = (void *)outs[1];
547 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
548 const struct ipz_qp_handle qp_handle,
549 struct ehca_pfqp *pfqp,
550 const u64 update_mask,
551 struct hcp_modify_qp_control_block *mqpcb,
555 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
556 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
557 adapter_handle.handle, /* r4 */
558 qp_handle.handle, /* r5 */
559 update_mask, /* r6 */
560 __pa(mqpcb), /* r7 */
563 if (ret == H_NOT_ENOUGH_RESOURCES)
564 ehca_gen_err("Insufficient resources ret=%lli", ret);
569 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
570 const struct ipz_qp_handle qp_handle,
571 struct ehca_pfqp *pfqp,
572 struct hcp_modify_qp_control_block *qqpcb,
575 return ehca_plpar_hcall_norets(H_QUERY_QP,
576 adapter_handle.handle, /* r4 */
577 qp_handle.handle, /* r5 */
578 __pa(qqpcb), /* r6 */
582 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
586 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
588 ret = hcp_galpas_dtor(&qp->galpas);
590 ehca_gen_err("Could not destruct qp->galpas");
593 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
594 adapter_handle.handle, /* r4 */
597 qp->ipz_qp_handle.handle, /* r6 */
599 if (ret == H_HARDWARE)
600 ehca_gen_err("HCA not operational. ret=%lli", ret);
602 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
603 adapter_handle.handle, /* r4 */
604 qp->ipz_qp_handle.handle, /* r5 */
607 if (ret == H_RESOURCE)
608 ehca_gen_err("Resource still in use. ret=%lli", ret);
613 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
614 const struct ipz_qp_handle qp_handle,
618 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
619 adapter_handle.handle, /* r4 */
620 qp_handle.handle, /* r5 */
625 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
626 const struct ipz_qp_handle qp_handle,
628 u32 port, u32 * pma_qp_nr,
632 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
634 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
635 adapter_handle.handle, /* r4 */
636 qp_handle.handle, /* r5 */
639 *pma_qp_nr = (u32)outs[0];
640 *bma_qp_nr = (u32)outs[1];
642 if (ret == H_ALIAS_EXIST)
643 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
648 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
649 const struct ipz_qp_handle qp_handle,
652 u64 subnet_prefix, u64 interface_id)
656 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
657 adapter_handle.handle, /* r4 */
658 qp_handle.handle, /* r5 */
660 interface_id, /* r7 */
661 subnet_prefix, /* r8 */
664 if (ret == H_NOT_ENOUGH_RESOURCES)
665 ehca_gen_err("Not enough resources. ret=%lli", ret);
670 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
671 const struct ipz_qp_handle qp_handle,
674 u64 subnet_prefix, u64 interface_id)
676 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
677 adapter_handle.handle, /* r4 */
678 qp_handle.handle, /* r5 */
680 interface_id, /* r7 */
681 subnet_prefix, /* r8 */
685 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
691 ret = hcp_galpas_dtor(&cq->galpas);
693 ehca_gen_err("Could not destruct cp->galpas");
697 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
698 adapter_handle.handle, /* r4 */
699 cq->ipz_cq_handle.handle, /* r5 */
700 force_flag != 0 ? 1L : 0L, /* r6 */
703 if (ret == H_RESOURCE)
704 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
709 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
714 ret = hcp_galpas_dtor(&eq->galpas);
716 ehca_gen_err("Could not destruct eq->galpas");
720 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
721 adapter_handle.handle, /* r4 */
722 eq->ipz_eq_handle.handle, /* r5 */
725 if (ret == H_RESOURCE)
726 ehca_gen_err("Resource in use. ret=%lli ", ret);
731 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
732 const struct ehca_mr *mr,
735 const u32 access_ctrl,
736 const struct ipz_pd pd,
737 struct ehca_mr_hipzout_parms *outparms)
740 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
742 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
743 adapter_handle.handle, /* r4 */
747 (((u64)access_ctrl) << 32ULL), /* r8 */
750 outparms->handle.handle = outs[0];
751 outparms->lkey = (u32)outs[2];
752 outparms->rkey = (u32)outs[3];
757 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
758 const struct ehca_mr *mr,
761 const u64 logical_address_of_page,
766 if (unlikely(ehca_debug_level >= 3)) {
770 kpage = __va(logical_address_of_page);
771 for (i = 0; i < count; i++)
772 ehca_gen_dbg("kpage[%d]=%p",
773 i, (void *)kpage[i]);
775 ehca_gen_dbg("kpage=%p",
776 (void *)logical_address_of_page);
779 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
780 ehca_gen_err("logical_address_of_page not on a 4k boundary "
781 "adapter_handle=%llx mr=%p mr_handle=%llx "
782 "pagesize=%x queue_type=%x "
783 "logical_address_of_page=%llx count=%llx",
784 adapter_handle.handle, mr,
785 mr->ipz_mr_handle.handle, pagesize, queue_type,
786 logical_address_of_page, count);
789 ret = hipz_h_register_rpage(adapter_handle, pagesize,
791 mr->ipz_mr_handle.handle,
792 logical_address_of_page, count);
796 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
797 const struct ehca_mr *mr,
798 struct ehca_mr_hipzout_parms *outparms)
801 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
803 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
804 adapter_handle.handle, /* r4 */
805 mr->ipz_mr_handle.handle, /* r5 */
806 0, 0, 0, 0, 0, 0, 0);
807 outparms->len = outs[0];
808 outparms->vaddr = outs[1];
809 outparms->acl = outs[4] >> 32;
810 outparms->lkey = (u32)(outs[5] >> 32);
811 outparms->rkey = (u32)(outs[5] & (0xffffffff));
816 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
817 const struct ehca_mr *mr)
819 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
820 adapter_handle.handle, /* r4 */
821 mr->ipz_mr_handle.handle, /* r5 */
825 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
826 const struct ehca_mr *mr,
829 const u32 access_ctrl,
830 const struct ipz_pd pd,
831 const u64 mr_addr_cb,
832 struct ehca_mr_hipzout_parms *outparms)
835 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
837 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
838 adapter_handle.handle, /* r4 */
839 mr->ipz_mr_handle.handle, /* r5 */
843 ((((u64)access_ctrl) << 32ULL) | pd.value),
846 outparms->vaddr = outs[1];
847 outparms->lkey = (u32)outs[2];
848 outparms->rkey = (u32)outs[3];
853 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
854 const struct ehca_mr *mr,
855 const struct ehca_mr *orig_mr,
857 const u32 access_ctrl,
858 const struct ipz_pd pd,
859 struct ehca_mr_hipzout_parms *outparms)
862 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
864 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
865 adapter_handle.handle, /* r4 */
866 orig_mr->ipz_mr_handle.handle, /* r5 */
868 (((u64)access_ctrl) << 32ULL), /* r7 */
871 outparms->handle.handle = outs[0];
872 outparms->lkey = (u32)outs[2];
873 outparms->rkey = (u32)outs[3];
878 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
879 const struct ehca_mw *mw,
880 const struct ipz_pd pd,
881 struct ehca_mw_hipzout_parms *outparms)
884 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
886 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
887 adapter_handle.handle, /* r4 */
891 outparms->handle.handle = outs[0];
892 outparms->rkey = (u32)outs[3];
897 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
898 const struct ehca_mw *mw,
899 struct ehca_mw_hipzout_parms *outparms)
902 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
904 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
905 adapter_handle.handle, /* r4 */
906 mw->ipz_mw_handle.handle, /* r5 */
907 0, 0, 0, 0, 0, 0, 0);
908 outparms->rkey = (u32)outs[3];
913 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
914 const struct ehca_mw *mw)
916 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
917 adapter_handle.handle, /* r4 */
918 mw->ipz_mw_handle.handle, /* r5 */
922 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
923 const u64 ressource_handle,
925 unsigned long *byte_count)
927 u64 r_cb = __pa(rblock);
929 if (r_cb & (EHCA_PAGESIZE-1)) {
930 ehca_gen_err("rblock not page aligned.");
934 return ehca_plpar_hcall_norets(H_ERROR_DATA,
935 adapter_handle.handle,
941 u64 hipz_h_eoi(int irq)
946 xirr = (0xffULL << 24) | irq;
948 return plpar_hcall_norets(H_EOI, xirr);