1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2020 - Cornelis Networks, Inc.
4 * Copyright(c) 2015 - 2018 Intel Corporation.
8 #include <linux/types.h>
9 #include <linux/device.h>
10 #include <linux/dmapool.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <linux/highmem.h>
15 #include <linux/uio.h>
16 #include <linux/rbtree.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/mmu_context.h>
21 #include <linux/module.h>
22 #include <linux/vmalloc.h>
23 #include <linux/string.h>
27 #include "user_sdma.h"
28 #include "verbs.h" /* for the headers */
29 #include "common.h" /* for struct hfi1_tid_info */
32 static uint hfi1_sdma_comp_ring_size = 128;
33 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
34 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
36 static unsigned initial_pkt_count = 8;
38 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
39 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
40 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
41 static void user_sdma_free_request(struct user_sdma_request *req);
42 static int check_header_template(struct user_sdma_request *req,
43 struct hfi1_pkt_header *hdr, u32 lrhlen,
45 static int set_txreq_header(struct user_sdma_request *req,
46 struct user_sdma_txreq *tx, u32 datalen);
47 static int set_txreq_header_ahg(struct user_sdma_request *req,
48 struct user_sdma_txreq *tx, u32 len);
49 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
50 struct hfi1_user_sdma_comp_q *cq,
51 u16 idx, enum hfi1_sdma_comp_state state,
53 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
54 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
56 static int defer_packet_queue(
57 struct sdma_engine *sde,
58 struct iowait_work *wait,
59 struct sdma_txreq *txreq,
62 static void activate_packet_queue(struct iowait *wait, int reason);
63 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
65 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
66 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
67 void *arg2, bool *stop);
68 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
69 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
71 static struct mmu_rb_ops sdma_rb_ops = {
72 .filter = sdma_rb_filter,
73 .insert = sdma_rb_insert,
74 .evict = sdma_rb_evict,
75 .remove = sdma_rb_remove,
76 .invalidate = sdma_rb_invalidate
79 static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
80 struct user_sdma_txreq *tx,
81 struct user_sdma_iovec *iovec,
84 static int defer_packet_queue(
85 struct sdma_engine *sde,
86 struct iowait_work *wait,
87 struct sdma_txreq *txreq,
91 struct hfi1_user_sdma_pkt_q *pq =
92 container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
94 write_seqlock(&sde->waitlock);
95 trace_hfi1_usdma_defer(pq, sde, &pq->busy);
96 if (sdma_progress(sde, seq, txreq))
99 * We are assuming that if the list is enqueued somewhere, it
100 * is to the dmawait list since that is the only place where
101 * it is supposed to be enqueued.
103 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
104 if (list_empty(&pq->busy.list)) {
105 pq->busy.lock = &sde->waitlock;
106 iowait_get_priority(&pq->busy);
107 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
109 write_sequnlock(&sde->waitlock);
112 write_sequnlock(&sde->waitlock);
116 static void activate_packet_queue(struct iowait *wait, int reason)
118 struct hfi1_user_sdma_pkt_q *pq =
119 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
121 trace_hfi1_usdma_activate(pq, wait, reason);
122 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
123 wake_up(&wait->wait_dma);
126 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
127 struct hfi1_filedata *fd)
131 struct hfi1_devdata *dd;
132 struct hfi1_user_sdma_comp_q *cq;
133 struct hfi1_user_sdma_pkt_q *pq;
138 if (!hfi1_sdma_comp_ring_size)
143 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
147 pq->ctxt = uctxt->ctxt;
148 pq->subctxt = fd->subctxt;
149 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
150 atomic_set(&pq->n_reqs, 0);
151 init_waitqueue_head(&pq->wait);
152 atomic_set(&pq->n_locked, 0);
154 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
155 activate_packet_queue, NULL, NULL);
158 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
164 pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
166 goto pq_reqs_no_in_use;
168 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
170 pq->txreq_cache = kmem_cache_create(buf,
171 sizeof(struct user_sdma_txreq),
175 if (!pq->txreq_cache) {
176 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
181 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
185 cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
186 * hfi1_sdma_comp_ring_size));
190 cq->nentries = hfi1_sdma_comp_ring_size;
192 ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
195 dd_dev_err(dd, "Failed to register with MMU %d", ret);
199 rcu_assign_pointer(fd->pq, pq);
209 kmem_cache_destroy(pq->txreq_cache);
211 bitmap_free(pq->req_in_use);
220 static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
223 seqlock_t *lock = pq->busy.lock;
227 write_seqlock_irqsave(lock, flags);
228 if (!list_empty(&pq->busy.list)) {
229 list_del_init(&pq->busy.list);
230 pq->busy.lock = NULL;
232 write_sequnlock_irqrestore(lock, flags);
235 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
236 struct hfi1_ctxtdata *uctxt)
238 struct hfi1_user_sdma_pkt_q *pq;
240 trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
242 spin_lock(&fd->pq_rcu_lock);
243 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
244 lockdep_is_held(&fd->pq_rcu_lock));
246 rcu_assign_pointer(fd->pq, NULL);
247 spin_unlock(&fd->pq_rcu_lock);
248 synchronize_srcu(&fd->pq_srcu);
249 /* at this point there can be no more new requests */
251 hfi1_mmu_rb_unregister(pq->handler);
252 iowait_sdma_drain(&pq->busy);
253 /* Wait until all requests have been freed. */
254 wait_event_interruptible(
256 !atomic_read(&pq->n_reqs));
258 bitmap_free(pq->req_in_use);
259 kmem_cache_destroy(pq->txreq_cache);
263 spin_unlock(&fd->pq_rcu_lock);
266 vfree(fd->cq->comps);
273 static u8 dlid_to_selector(u16 dlid)
275 static u8 mapping[256];
276 static int initialized;
281 memset(mapping, 0xFF, 256);
285 hash = ((dlid >> 8) ^ dlid) & 0xFF;
286 if (mapping[hash] == 0xFF) {
287 mapping[hash] = next;
288 next = (next + 1) & 0x7F;
291 return mapping[hash];
295 * hfi1_user_sdma_process_request() - Process and start a user sdma request
296 * @fd: valid file descriptor
297 * @iovec: array of io vectors to process
298 * @dim: overall iovec array size
299 * @count: number of io vector array entries processed
301 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
302 struct iovec *iovec, unsigned long dim,
303 unsigned long *count)
306 struct hfi1_ctxtdata *uctxt = fd->uctxt;
307 struct hfi1_user_sdma_pkt_q *pq =
308 srcu_dereference(fd->pq, &fd->pq_srcu);
309 struct hfi1_user_sdma_comp_q *cq = fd->cq;
310 struct hfi1_devdata *dd = pq->dd;
311 unsigned long idx = 0;
312 u8 pcount = initial_pkt_count;
313 struct sdma_req_info info;
314 struct user_sdma_request *req;
321 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
324 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
325 dd->unit, uctxt->ctxt, fd->subctxt,
326 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
329 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
331 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
332 dd->unit, uctxt->ctxt, fd->subctxt, ret);
336 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
338 if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
340 "[%u:%u:%u:%u] Invalid comp index",
341 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
346 * Sanity check the header io vector count. Need at least 1 vector
347 * (header) and cannot be larger than the actual io vector count.
349 if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
351 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
352 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
353 req_iovcnt(info.ctrl), dim);
357 if (!info.fragsize) {
359 "[%u:%u:%u:%u] Request does not specify fragsize",
360 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
364 /* Try to claim the request. */
365 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
366 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
367 dd->unit, uctxt->ctxt, fd->subctxt,
372 * All safety checks have been done and this request has been claimed.
374 trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt,
376 req = pq->reqs + info.comp_idx;
377 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
386 req->seqsubmitted = 0;
389 INIT_LIST_HEAD(&req->txps);
391 memcpy(&req->info, &info, sizeof(info));
393 /* The request is initialized, count it */
394 atomic_inc(&pq->n_reqs);
396 if (req_opcode(info.ctrl) == EXPECTED) {
397 /* expected must have a TID info and at least one data vector */
398 if (req->data_iovs < 2) {
400 "Not enough vectors for expected request");
407 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
408 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
409 MAX_VECTORS_PER_REQ);
414 /* Copy the header from the user buffer */
415 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
418 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
423 /* If Static rate control is not enabled, sanitize the header. */
424 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
427 /* Validate the opcode. Do not trust packets from user space blindly. */
428 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
429 if ((opcode & USER_OPCODE_CHECK_MASK) !=
430 USER_OPCODE_CHECK_VAL) {
431 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
436 * Validate the vl. Do not trust packets from user space blindly.
437 * VL comes from PBC, SC comes from LRH, and the VL needs to
438 * match the SC look up.
440 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
441 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
442 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
443 if (vl >= dd->pport->vls_operational ||
444 vl != sc_to_vlt(dd, sc)) {
445 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
450 /* Checking P_KEY for requests from user-space */
451 pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
452 slid = be16_to_cpu(req->hdr.lrh[3]);
453 if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
459 * Also should check the BTH.lnh. If it says the next header is GRH then
460 * the RXE parsing will be off and will land in the middle of the KDETH
461 * or miss it entirely.
463 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
464 SDMA_DBG(req, "User tried to pass in a GRH");
469 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
471 * Calculate the initial TID offset based on the values of
472 * KDETH.OFFSET and KDETH.OM that are passed in.
474 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
475 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
476 KDETH_OM_LARGE : KDETH_OM_SMALL);
477 trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt,
478 info.comp_idx, req->tidoffset);
481 /* Save all the IO vector structures */
482 for (i = 0; i < req->data_iovs; i++) {
483 req->iovs[i].offset = 0;
484 INIT_LIST_HEAD(&req->iovs[i].list);
485 memcpy(&req->iovs[i].iov,
487 sizeof(req->iovs[i].iov));
488 if (req->iovs[i].iov.iov_len == 0) {
492 req->data_len += req->iovs[i].iov.iov_len;
494 trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt,
495 info.comp_idx, req->data_len);
496 if (pcount > req->info.npkts)
497 pcount = req->info.npkts;
500 * User space will provide the TID info only when the
501 * request type is EXPECTED. This is true even if there is
502 * only one packet in the request and the header is already
503 * setup. The reason for the singular TID case is that the
504 * driver needs to perform safety checks.
506 if (req_opcode(req->info.ctrl) == EXPECTED) {
507 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
510 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
516 * We have to copy all of the tids because they may vary
517 * in size and, therefore, the TID count might not be
518 * equal to the pkt count. However, there is no way to
519 * tell at this point.
521 tmp = memdup_user(iovec[idx].iov_base,
522 ntids * sizeof(*req->tids));
525 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
535 dlid = be16_to_cpu(req->hdr.lrh[1]);
536 selector = dlid_to_selector(dlid);
537 selector += uctxt->ctxt + fd->subctxt;
538 req->sde = sdma_select_user_engine(dd, selector, vl);
540 if (!req->sde || !sdma_running(req->sde)) {
545 /* We don't need an AHG entry if the request contains only one packet */
546 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
547 req->ahg_idx = sdma_ahg_alloc(req->sde);
549 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
550 pq->state = SDMA_PKT_Q_ACTIVE;
553 * This is a somewhat blocking send implementation.
554 * The driver will block the caller until all packets of the
555 * request have been submitted to the SDMA engine. However, it
556 * will not wait for send completions.
558 while (req->seqsubmitted != req->info.npkts) {
559 ret = user_sdma_send_pkts(req, pcount);
565 we_ret = wait_event_interruptible_timeout(
567 pq->state == SDMA_PKT_Q_ACTIVE,
569 SDMA_IOWAIT_TIMEOUT));
570 trace_hfi1_usdma_we(pq, we_ret);
579 * If the submitted seqsubmitted == npkts, the completion routine
580 * controls the final state. If sequbmitted < npkts, wait for any
581 * outstanding packets to finish before cleaning up.
583 if (req->seqsubmitted < req->info.npkts) {
584 if (req->seqsubmitted)
585 wait_event(pq->busy.wait_dma,
586 (req->seqcomp == req->seqsubmitted - 1));
587 user_sdma_free_request(req);
589 set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
594 static inline u32 compute_data_length(struct user_sdma_request *req,
595 struct user_sdma_txreq *tx)
598 * Determine the proper size of the packet data.
599 * The size of the data of the first packet is in the header
600 * template. However, it includes the header and ICRC, which need
602 * The minimum representable packet data length in a header is 4 bytes,
603 * therefore, when the data length request is less than 4 bytes, there's
604 * only one packet, and the packet data length is equal to that of the
605 * request data length.
606 * The size of the remaining packets is the minimum of the frag
607 * size (MTU) or remaining data in the request.
612 if (req->data_len < sizeof(u32))
615 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
616 (sizeof(tx->hdr) - 4));
617 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
618 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
621 * Get the data length based on the remaining space in the
624 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
625 /* If we've filled up the TID pair, move to the next one. */
626 if (unlikely(!len) && ++req->tididx < req->n_tids &&
627 req->tids[req->tididx]) {
628 tidlen = EXP_TID_GET(req->tids[req->tididx],
631 len = min_t(u32, tidlen, req->info.fragsize);
634 * Since the TID pairs map entire pages, make sure that we
635 * are not going to try to send more data that we have
638 len = min(len, req->data_len - req->sent);
640 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
642 trace_hfi1_sdma_user_compute_length(req->pq->dd,
650 static inline u32 pad_len(u32 len)
652 if (len & (sizeof(u32) - 1))
653 len += sizeof(u32) - (len & (sizeof(u32) - 1));
657 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
659 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
660 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
663 static int user_sdma_txadd_ahg(struct user_sdma_request *req,
664 struct user_sdma_txreq *tx,
668 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
669 u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
670 struct hfi1_user_sdma_pkt_q *pq = req->pq;
673 * Copy the request header into the tx header
674 * because the HW needs a cacheline-aligned
676 * This copy can be optimized out if the hdr
677 * member of user_sdma_request were also
680 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
681 if (PBC2LRH(pbclen) != lrhlen) {
682 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
683 tx->hdr.pbc[0] = cpu_to_le16(pbclen);
685 ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
688 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
689 sizeof(tx->hdr) + datalen, req->ahg_idx,
690 0, NULL, 0, user_sdma_txreq_cb);
693 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
695 sdma_txclean(pq->dd, &tx->txreq);
699 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
704 struct user_sdma_txreq *tx = NULL;
705 struct hfi1_user_sdma_pkt_q *pq = NULL;
706 struct user_sdma_iovec *iovec = NULL;
713 /* If tx completion has reported an error, we are done. */
714 if (READ_ONCE(req->has_error))
718 * Check if we might have sent the entire request already
720 if (unlikely(req->seqnum == req->info.npkts)) {
721 if (!list_empty(&req->txps))
726 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
727 maxpkts = req->info.npkts - req->seqnum;
729 while (npkts < maxpkts) {
733 * Check whether any of the completions have come back
734 * with errors. If so, we are not going to process any
735 * more packets from this request.
737 if (READ_ONCE(req->has_error))
740 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
746 INIT_LIST_HEAD(&tx->list);
749 * For the last packet set the ACK request
750 * and disable header suppression.
752 if (req->seqnum == req->info.npkts - 1)
753 tx->flags |= (TXREQ_FLAGS_REQ_ACK |
754 TXREQ_FLAGS_REQ_DISABLE_SH);
757 * Calculate the payload size - this is min of the fragment
758 * (MTU) size or the remaining bytes in the request but only
759 * if we have payload data.
762 iovec = &req->iovs[req->iov_idx];
763 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
764 if (++req->iov_idx == req->data_iovs) {
768 iovec = &req->iovs[req->iov_idx];
769 WARN_ON(iovec->offset);
772 datalen = compute_data_length(req, tx);
775 * Disable header suppression for the payload <= 8DWS.
776 * If there is an uncorrectable error in the receive
777 * data FIFO when the received payload size is less than
778 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
779 * not reported.There is set RHF.EccErr if the header
784 "Request has data but pkt len is 0");
787 } else if (datalen <= 32) {
788 tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
792 if (req->ahg_idx >= 0) {
794 ret = user_sdma_txadd_ahg(req, tx, datalen);
800 changes = set_txreq_header_ahg(req, tx,
808 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
809 datalen, user_sdma_txreq_cb);
813 * Modify the header for this packet. This only needs
814 * to be done if we are not going to use AHG. Otherwise,
815 * the HW will do it based on the changes we gave it
816 * during sdma_txinit_ahg().
818 ret = set_txreq_header(req, tx, datalen);
823 req->koffset += datalen;
824 if (req_opcode(req->info.ctrl) == EXPECTED)
825 req->tidoffset += datalen;
826 req->sent += datalen;
828 ret = add_system_pages_to_sdma_packet(req, tx, iovec,
832 iovec = &req->iovs[req->iov_idx];
834 list_add_tail(&tx->txreq.list, &req->txps);
836 * It is important to increment this here as it is used to
837 * generate the BTH.PSN and, therefore, can't be bulk-updated
838 * outside of the loop.
840 tx->seqnum = req->seqnum++;
844 ret = sdma_send_txlist(req->sde,
845 iowait_get_ib_work(&pq->busy),
847 req->seqsubmitted += count;
848 if (req->seqsubmitted == req->info.npkts) {
850 * The txreq has already been submitted to the HW queue
851 * so we can free the AHG entry now. Corruption will not
852 * happen due to the sequential manner in which
853 * descriptors are processed.
855 if (req->ahg_idx >= 0)
856 sdma_ahg_free(req->sde, req->ahg_idx);
861 sdma_txclean(pq->dd, &tx->txreq);
863 kmem_cache_free(pq->txreq_cache, tx);
867 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
869 struct evict_data evict_data;
870 struct mmu_rb_handler *handler = pq->handler;
872 evict_data.cleared = 0;
873 evict_data.target = npages;
874 hfi1_mmu_rb_evict(handler, &evict_data);
875 return evict_data.cleared;
878 static int check_header_template(struct user_sdma_request *req,
879 struct hfi1_pkt_header *hdr, u32 lrhlen,
883 * Perform safety checks for any type of packet:
884 * - transfer size is multiple of 64bytes
885 * - packet length is multiple of 4 bytes
886 * - packet length is not larger than MTU size
888 * These checks are only done for the first packet of the
889 * transfer since the header is "given" to us by user space.
890 * For the remainder of the packets we compute the values.
892 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
893 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
896 if (req_opcode(req->info.ctrl) == EXPECTED) {
898 * The header is checked only on the first packet. Furthermore,
899 * we ensure that at least one TID entry is copied when the
900 * request is submitted. Therefore, we don't have to verify that
901 * tididx points to something sane.
903 u32 tidval = req->tids[req->tididx],
904 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
905 tididx = EXP_TID_GET(tidval, IDX),
906 tidctrl = EXP_TID_GET(tidval, CTRL),
908 __le32 kval = hdr->kdeth.ver_tid_offset;
910 tidoff = KDETH_GET(kval, OFFSET) *
911 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
912 KDETH_OM_LARGE : KDETH_OM_SMALL);
914 * Expected receive packets have the following
916 * - offset is not larger than the TID size
917 * - TIDCtrl values match between header and TID array
918 * - TID indexes match between header and TID array
920 if ((tidoff + datalen > tidlen) ||
921 KDETH_GET(kval, TIDCTRL) != tidctrl ||
922 KDETH_GET(kval, TID) != tididx)
929 * Correctly set the BTH.PSN field based on type of
930 * transfer - eager packets can just increment the PSN but
931 * expected packets encode generation and sequence in the
932 * BTH.PSN field so just incrementing will result in errors.
934 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
936 u32 val = be32_to_cpu(bthpsn),
937 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
941 psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) |
942 ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK);
948 static int set_txreq_header(struct user_sdma_request *req,
949 struct user_sdma_txreq *tx, u32 datalen)
951 struct hfi1_user_sdma_pkt_q *pq = req->pq;
952 struct hfi1_pkt_header *hdr = &tx->hdr;
953 u8 omfactor; /* KDETH.OM */
956 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
958 /* Copy the header template to the request before modification */
959 memcpy(hdr, &req->hdr, sizeof(*hdr));
962 * Check if the PBC and LRH length are mismatched. If so
963 * adjust both in the header.
965 pbclen = le16_to_cpu(hdr->pbc[0]);
966 if (PBC2LRH(pbclen) != lrhlen) {
967 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
968 hdr->pbc[0] = cpu_to_le16(pbclen);
969 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
972 * This is the first packet in the sequence that has
973 * a "static" size that can be used for the rest of
974 * the packets (besides the last one).
976 if (unlikely(req->seqnum == 2)) {
978 * From this point on the lengths in both the
979 * PBC and LRH are the same until the last
981 * Adjust the template so we don't have to update
984 req->hdr.pbc[0] = hdr->pbc[0];
985 req->hdr.lrh[2] = hdr->lrh[2];
989 * We only have to modify the header if this is not the
990 * first packet in the request. Otherwise, we use the
991 * header given to us.
993 if (unlikely(!req->seqnum)) {
994 ret = check_header_template(req, hdr, lrhlen, datalen);
1000 hdr->bth[2] = cpu_to_be32(
1001 set_pkt_bth_psn(hdr->bth[2],
1002 (req_opcode(req->info.ctrl) == EXPECTED),
1005 /* Set ACK request on last packet */
1006 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1007 hdr->bth[2] |= cpu_to_be32(1UL << 31);
1009 /* Set the new offset */
1010 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1011 /* Expected packets have to fill in the new TID information */
1012 if (req_opcode(req->info.ctrl) == EXPECTED) {
1013 tidval = req->tids[req->tididx];
1015 * If the offset puts us at the end of the current TID,
1016 * advance everything.
1018 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1022 * Since we don't copy all the TIDs, all at once,
1023 * we have to check again.
1025 if (++req->tididx > req->n_tids - 1 ||
1026 !req->tids[req->tididx]) {
1029 tidval = req->tids[req->tididx];
1031 omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1032 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1033 KDETH_OM_SMALL_SHIFT;
1034 /* Set KDETH.TIDCtrl based on value for this TID. */
1035 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1036 EXP_TID_GET(tidval, CTRL));
1037 /* Set KDETH.TID based on value for this TID */
1038 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1039 EXP_TID_GET(tidval, IDX));
1040 /* Clear KDETH.SH when DISABLE_SH flag is set */
1041 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
1042 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1044 * Set the KDETH.OFFSET and KDETH.OM based on size of
1047 trace_hfi1_sdma_user_tid_info(
1048 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
1049 req->tidoffset, req->tidoffset >> omfactor,
1050 omfactor != KDETH_OM_SMALL_SHIFT);
1051 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1052 req->tidoffset >> omfactor);
1053 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1054 omfactor != KDETH_OM_SMALL_SHIFT);
1057 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1058 req->info.comp_idx, hdr, tidval);
1059 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1062 static int set_txreq_header_ahg(struct user_sdma_request *req,
1063 struct user_sdma_txreq *tx, u32 datalen)
1065 u32 ahg[AHG_KDETH_ARRAY_SIZE];
1067 u8 omfactor; /* KDETH.OM */
1068 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1069 struct hfi1_pkt_header *hdr = &req->hdr;
1070 u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1071 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1072 size_t array_size = ARRAY_SIZE(ahg);
1074 if (PBC2LRH(pbclen) != lrhlen) {
1075 /* PBC.PbcLengthDWs */
1076 idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
1077 (__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
1080 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1081 idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
1082 (__force u16)cpu_to_be16(lrhlen >> 2));
1088 * Do the common updates
1090 /* BTH.PSN and BTH.A */
1091 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1092 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1093 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1095 idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
1096 (__force u16)cpu_to_be16(val32 >> 16));
1099 idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
1100 (__force u16)cpu_to_be16(val32 & 0xffff));
1104 idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
1105 (__force u16)cpu_to_le16(req->koffset & 0xffff));
1108 idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
1109 (__force u16)cpu_to_le16(req->koffset >> 16));
1112 if (req_opcode(req->info.ctrl) == EXPECTED) {
1115 tidval = req->tids[req->tididx];
1118 * If the offset puts us at the end of the current TID,
1119 * advance everything.
1121 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1125 * Since we don't copy all the TIDs, all at once,
1126 * we have to check again.
1128 if (++req->tididx > req->n_tids - 1 ||
1129 !req->tids[req->tididx])
1131 tidval = req->tids[req->tididx];
1133 omfactor = ((EXP_TID_GET(tidval, LEN) *
1135 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1136 KDETH_OM_SMALL_SHIFT;
1137 /* KDETH.OM and KDETH.OFFSET (TID) */
1138 idx = ahg_header_set(
1139 ahg, idx, array_size, 7, 0, 16,
1140 ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1141 ((req->tidoffset >> omfactor)
1145 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1146 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1147 (EXP_TID_GET(tidval, IDX) & 0x3ff));
1149 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1150 val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1152 AHG_KDETH_INTR_SHIFT));
1154 val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1155 cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1156 cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1158 AHG_KDETH_INTR_SHIFT));
1161 idx = ahg_header_set(ahg, idx, array_size,
1162 7, 16, 14, (__force u16)val);
1167 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1168 req->info.comp_idx, req->sde->this_idx,
1169 req->ahg_idx, ahg, idx, tidval);
1170 sdma_txinit_ahg(&tx->txreq,
1171 SDMA_TXREQ_F_USE_AHG,
1172 datalen, req->ahg_idx, idx,
1173 ahg, sizeof(req->hdr),
1174 user_sdma_txreq_cb);
1180 * user_sdma_txreq_cb() - SDMA tx request completion callback.
1181 * @txreq: valid sdma tx request
1182 * @status: success/failure of request
1184 * Called when the SDMA progress state machine gets notification that
1185 * the SDMA descriptors for this tx request have been processed by the
1186 * DMA engine. Called in interrupt context.
1187 * Only do work on completed sequences.
1189 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1191 struct user_sdma_txreq *tx =
1192 container_of(txreq, struct user_sdma_txreq, txreq);
1193 struct user_sdma_request *req;
1194 struct hfi1_user_sdma_pkt_q *pq;
1195 struct hfi1_user_sdma_comp_q *cq;
1196 enum hfi1_sdma_comp_state state = COMPLETE;
1205 if (status != SDMA_TXREQ_S_OK) {
1206 SDMA_DBG(req, "SDMA completion with error %d",
1208 WRITE_ONCE(req->has_error, 1);
1212 req->seqcomp = tx->seqnum;
1213 kmem_cache_free(pq->txreq_cache, tx);
1215 /* sequence isn't complete? We are done */
1216 if (req->seqcomp != req->info.npkts - 1)
1219 user_sdma_free_request(req);
1220 set_comp_state(pq, cq, req->info.comp_idx, state, status);
1224 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1226 if (atomic_dec_and_test(&pq->n_reqs))
1230 static void user_sdma_free_request(struct user_sdma_request *req)
1232 if (!list_empty(&req->txps)) {
1233 struct sdma_txreq *t, *p;
1235 list_for_each_entry_safe(t, p, &req->txps, list) {
1236 struct user_sdma_txreq *tx =
1237 container_of(t, struct user_sdma_txreq, txreq);
1238 list_del_init(&t->list);
1239 sdma_txclean(req->pq->dd, t);
1240 kmem_cache_free(req->pq->txreq_cache, tx);
1245 clear_bit(req->info.comp_idx, req->pq->req_in_use);
1248 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1249 struct hfi1_user_sdma_comp_q *cq,
1250 u16 idx, enum hfi1_sdma_comp_state state,
1254 cq->comps[idx].errcode = -ret;
1255 smp_wmb(); /* make sure errcode is visible first */
1256 cq->comps[idx].status = state;
1257 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1261 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1262 unsigned int start, unsigned int npages)
1264 hfi1_release_user_pages(mm, pages + start, npages, false);
1268 static void free_system_node(struct sdma_mmu_node *node)
1271 unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
1273 atomic_sub(node->npages, &node->pq->n_locked);
1278 static inline void acquire_node(struct sdma_mmu_node *node)
1280 atomic_inc(&node->refcount);
1281 WARN_ON(atomic_read(&node->refcount) < 0);
1284 static inline void release_node(struct mmu_rb_handler *handler,
1285 struct sdma_mmu_node *node)
1287 atomic_dec(&node->refcount);
1288 WARN_ON(atomic_read(&node->refcount) < 0);
1291 static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
1292 unsigned long start,
1295 struct mmu_rb_node *rb_node;
1296 struct sdma_mmu_node *node;
1297 unsigned long flags;
1299 spin_lock_irqsave(&handler->lock, flags);
1300 rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
1302 spin_unlock_irqrestore(&handler->lock, flags);
1305 node = container_of(rb_node, struct sdma_mmu_node, rb);
1307 spin_unlock_irqrestore(&handler->lock, flags);
1312 static int pin_system_pages(struct user_sdma_request *req,
1313 uintptr_t start_address, size_t length,
1314 struct sdma_mmu_node *node, int npages)
1316 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1317 int pinned, cleared;
1318 struct page **pages;
1320 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1325 if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
1327 SDMA_DBG(req, "Evicting: nlocked %u npages %u",
1328 atomic_read(&pq->n_locked), npages);
1329 cleared = sdma_cache_evict(pq, npages);
1330 if (cleared >= npages)
1334 SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
1335 start_address, node->npages, npages);
1336 pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
1341 SDMA_DBG(req, "pinned %d", pinned);
1344 if (pinned != npages) {
1345 unpin_vector_pages(current->mm, pages, node->npages, pinned);
1346 SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
1349 node->rb.addr = start_address;
1350 node->rb.len = length;
1351 node->pages = pages;
1352 node->npages = npages;
1353 atomic_add(pinned, &pq->n_locked);
1354 SDMA_DBG(req, "done. pinned %d", pinned);
1358 static int add_system_pinning(struct user_sdma_request *req,
1359 struct sdma_mmu_node **node_p,
1360 unsigned long start, unsigned long len)
1363 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1364 struct sdma_mmu_node *node;
1367 node = kzalloc(sizeof(*node), GFP_KERNEL);
1372 ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
1374 ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
1376 free_system_node(node);
1387 static int get_system_cache_entry(struct user_sdma_request *req,
1388 struct sdma_mmu_node **node_p,
1389 size_t req_start, size_t req_len)
1391 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1392 u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
1393 u64 end = PFN_ALIGN(req_start + req_len);
1394 struct mmu_rb_handler *handler = pq->handler;
1397 if ((end - start) == 0) {
1399 "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
1400 req_start, req_len, start, end);
1404 SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
1407 struct sdma_mmu_node *node =
1408 find_system_node(handler, start, end);
1409 u64 prepend_len = 0;
1411 SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
1413 ret = add_system_pinning(req, node_p, start,
1415 if (ret == -EEXIST) {
1417 * Another execution context has inserted a
1418 * conficting entry first.
1425 if (node->rb.addr <= start) {
1427 * This entry covers at least part of the region. If it doesn't extend
1428 * to the end, then this will be called again for the next segment.
1434 SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
1435 node->rb.addr, atomic_read(&node->refcount));
1436 prepend_len = node->rb.addr - start;
1439 * This node will not be returned, instead a new node
1440 * will be. So release the reference.
1442 release_node(handler, node);
1444 /* Prepend a node to cover the beginning of the allocation */
1445 ret = add_system_pinning(req, node_p, start, prepend_len);
1446 if (ret == -EEXIST) {
1447 /* Another execution context has inserted a conficting entry first. */
1454 static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
1455 struct user_sdma_txreq *tx,
1456 struct sdma_mmu_node *cache_entry,
1458 size_t from_this_cache_entry)
1460 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1461 unsigned int page_offset;
1462 unsigned int from_this_page;
1468 * Because the cache may be more fragmented than the memory that is being accessed,
1469 * it's not strictly necessary to have a descriptor per cache entry.
1472 while (from_this_cache_entry) {
1473 page_index = PFN_DOWN(start - cache_entry->rb.addr);
1475 if (page_index >= cache_entry->npages) {
1477 "Request for page_index %zu >= cache_entry->npages %u",
1478 page_index, cache_entry->npages);
1482 page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
1483 from_this_page = PAGE_SIZE - page_offset;
1485 if (from_this_page < from_this_cache_entry) {
1489 * In the case they are equal the next line has no practical effect,
1490 * but it's better to do a register to register copy than a conditional
1493 from_this_page = from_this_cache_entry;
1497 ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
1498 cache_entry->pages[page_index],
1499 page_offset, from_this_page);
1502 * When there's a failure, the entire request is freed by
1503 * user_sdma_send_pkts().
1506 "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
1507 ret, page_index, page_offset, from_this_page);
1510 start += from_this_page;
1511 from_this_cache_entry -= from_this_page;
1516 static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
1517 struct user_sdma_txreq *tx,
1518 struct user_sdma_iovec *iovec,
1519 size_t from_this_iovec)
1521 struct mmu_rb_handler *handler = req->pq->handler;
1523 while (from_this_iovec > 0) {
1524 struct sdma_mmu_node *cache_entry;
1525 size_t from_this_cache_entry;
1529 start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
1530 ret = get_system_cache_entry(req, &cache_entry, start,
1533 SDMA_DBG(req, "pin system segment failed %d", ret);
1537 from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
1538 if (from_this_cache_entry > from_this_iovec)
1539 from_this_cache_entry = from_this_iovec;
1541 ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
1542 from_this_cache_entry);
1545 * We're guaranteed that there will be no descriptor
1546 * completion callback that releases this node
1547 * because only the last descriptor referencing it
1548 * has a context attached, and a failure means the
1549 * last descriptor was never added.
1551 release_node(handler, cache_entry);
1552 SDMA_DBG(req, "add system segment failed %d", ret);
1556 iovec->offset += from_this_cache_entry;
1557 from_this_iovec -= from_this_cache_entry;
1563 static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
1564 struct user_sdma_txreq *tx,
1565 struct user_sdma_iovec *iovec,
1566 u32 *pkt_data_remaining)
1568 size_t remaining_to_add = *pkt_data_remaining;
1570 * Walk through iovec entries, ensure the associated pages
1571 * are pinned and mapped, add data to the packet until no more
1572 * data remains to be added.
1574 while (remaining_to_add > 0) {
1575 struct user_sdma_iovec *cur_iovec;
1576 size_t from_this_iovec;
1580 from_this_iovec = iovec->iov.iov_len - iovec->offset;
1582 if (from_this_iovec > remaining_to_add) {
1583 from_this_iovec = remaining_to_add;
1585 /* The current iovec entry will be consumed by this pass. */
1590 ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
1595 remaining_to_add -= from_this_iovec;
1597 *pkt_data_remaining = remaining_to_add;
1602 void system_descriptor_complete(struct hfi1_devdata *dd,
1603 struct sdma_desc *descp)
1605 switch (sdma_mapping_type(descp)) {
1606 case SDMA_MAP_SINGLE:
1607 dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
1608 sdma_mapping_len(descp), DMA_TO_DEVICE);
1611 dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
1612 sdma_mapping_len(descp), DMA_TO_DEVICE);
1616 if (descp->pinning_ctx) {
1617 struct sdma_mmu_node *node = descp->pinning_ctx;
1619 release_node(node->rb.handler, node);
1623 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1626 return (bool)(node->addr == addr);
1629 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1631 struct sdma_mmu_node *node =
1632 container_of(mnode, struct sdma_mmu_node, rb);
1634 atomic_inc(&node->refcount);
1639 * Return 1 to remove the node from the rb tree and call the remove op.
1641 * Called with the rb tree lock held.
1643 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1644 void *evict_arg, bool *stop)
1646 struct sdma_mmu_node *node =
1647 container_of(mnode, struct sdma_mmu_node, rb);
1648 struct evict_data *evict_data = evict_arg;
1650 /* is this node still being used? */
1651 if (atomic_read(&node->refcount))
1652 return 0; /* keep this node */
1654 /* this node will be evicted, add its pages to our count */
1655 evict_data->cleared += node->npages;
1657 /* have enough pages been cleared? */
1658 if (evict_data->cleared >= evict_data->target)
1661 return 1; /* remove this node */
1664 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1666 struct sdma_mmu_node *node =
1667 container_of(mnode, struct sdma_mmu_node, rb);
1669 free_system_node(node);
1672 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1674 struct sdma_mmu_node *node =
1675 container_of(mnode, struct sdma_mmu_node, rb);
1677 if (!atomic_read(&node->refcount))