4 * Copyright(c) 2015, 2016 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/types.h>
51 #include <linux/list.h>
52 #include <asm/byteorder.h>
53 #include <linux/workqueue.h>
54 #include <linux/rculist.h>
58 #include "sdma_txreq.h"
62 /* Hardware limit for SDMA packet size */
63 #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
65 #define SDMA_TXREQ_S_OK 0
66 #define SDMA_TXREQ_S_SENDERROR 1
67 #define SDMA_TXREQ_S_ABORTED 2
68 #define SDMA_TXREQ_S_SHUTDOWN 3
71 #define SDMA_TXREQ_F_URGENT 0x0001
72 #define SDMA_TXREQ_F_AHG_COPY 0x0002
73 #define SDMA_TXREQ_F_USE_AHG 0x0004
75 #define SDMA_MAP_NONE 0
76 #define SDMA_MAP_SINGLE 1
77 #define SDMA_MAP_PAGE 2
79 #define SDMA_AHG_VALUE_MASK 0xffff
80 #define SDMA_AHG_VALUE_SHIFT 0
81 #define SDMA_AHG_INDEX_MASK 0xf
82 #define SDMA_AHG_INDEX_SHIFT 16
83 #define SDMA_AHG_FIELD_LEN_MASK 0xf
84 #define SDMA_AHG_FIELD_LEN_SHIFT 20
85 #define SDMA_AHG_FIELD_START_MASK 0x1f
86 #define SDMA_AHG_FIELD_START_SHIFT 24
87 #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
88 #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
93 * Be aware the ordering and values
94 * for SDMA_AHG_APPLY_UPDATE[123]
95 * are assumed in generating a skip
96 * count in submit_tx() in sdma.c
98 #define SDMA_AHG_NO_AHG 0
99 #define SDMA_AHG_COPY 1
100 #define SDMA_AHG_APPLY_UPDATE1 2
101 #define SDMA_AHG_APPLY_UPDATE2 3
102 #define SDMA_AHG_APPLY_UPDATE3 4
105 * Bits defined in the send DMA descriptor.
107 #define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
108 #define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
109 #define SDMA_DESC0_BYTE_COUNT_SHIFT 48
110 #define SDMA_DESC0_BYTE_COUNT_WIDTH 14
111 #define SDMA_DESC0_BYTE_COUNT_MASK \
112 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
113 #define SDMA_DESC0_BYTE_COUNT_SMASK \
114 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
115 #define SDMA_DESC0_PHY_ADDR_SHIFT 0
116 #define SDMA_DESC0_PHY_ADDR_WIDTH 48
117 #define SDMA_DESC0_PHY_ADDR_MASK \
118 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
119 #define SDMA_DESC0_PHY_ADDR_SMASK \
120 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
122 #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
123 #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
124 #define SDMA_DESC1_HEADER_UPDATE1_MASK \
125 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
126 #define SDMA_DESC1_HEADER_UPDATE1_SMASK \
127 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
128 #define SDMA_DESC1_HEADER_MODE_SHIFT 13
129 #define SDMA_DESC1_HEADER_MODE_WIDTH 3
130 #define SDMA_DESC1_HEADER_MODE_MASK \
131 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
132 #define SDMA_DESC1_HEADER_MODE_SMASK \
133 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
134 #define SDMA_DESC1_HEADER_INDEX_SHIFT 8
135 #define SDMA_DESC1_HEADER_INDEX_WIDTH 5
136 #define SDMA_DESC1_HEADER_INDEX_MASK \
137 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
138 #define SDMA_DESC1_HEADER_INDEX_SMASK \
139 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
140 #define SDMA_DESC1_HEADER_DWS_SHIFT 4
141 #define SDMA_DESC1_HEADER_DWS_WIDTH 4
142 #define SDMA_DESC1_HEADER_DWS_MASK \
143 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
144 #define SDMA_DESC1_HEADER_DWS_SMASK \
145 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
146 #define SDMA_DESC1_GENERATION_SHIFT 2
147 #define SDMA_DESC1_GENERATION_WIDTH 2
148 #define SDMA_DESC1_GENERATION_MASK \
149 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
150 #define SDMA_DESC1_GENERATION_SMASK \
151 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
152 #define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
153 #define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
156 sdma_state_s00_hw_down,
157 sdma_state_s10_hw_start_up_halt_wait,
158 sdma_state_s15_hw_start_up_clean_wait,
160 sdma_state_s30_sw_clean_up_wait,
161 sdma_state_s40_hw_clean_up_wait,
162 sdma_state_s50_hw_halt_wait,
163 sdma_state_s60_idle_halt_wait,
164 sdma_state_s80_hw_freeze,
165 sdma_state_s82_freeze_sw_clean,
166 sdma_state_s99_running,
170 sdma_event_e00_go_hw_down,
171 sdma_event_e10_go_hw_start,
172 sdma_event_e15_hw_halt_done,
173 sdma_event_e25_hw_clean_up_done,
174 sdma_event_e30_go_running,
175 sdma_event_e40_sw_cleaned,
176 sdma_event_e50_hw_cleaned,
177 sdma_event_e60_hw_halted,
178 sdma_event_e70_go_idle,
179 sdma_event_e80_hw_freeze,
180 sdma_event_e81_hw_frozen,
181 sdma_event_e82_hw_unfreeze,
182 sdma_event_e85_link_down,
183 sdma_event_e90_sw_halted,
186 struct sdma_set_state_action {
187 unsigned op_enable:1;
188 unsigned op_intenable:1;
190 unsigned op_cleanup:1;
191 unsigned go_s99_running_tofalse:1;
192 unsigned go_s99_running_totrue:1;
197 struct completion comp;
198 enum sdma_states current_state;
200 unsigned go_s99_running;
201 /* debugging/development */
202 enum sdma_states previous_state;
203 unsigned previous_op;
204 enum sdma_events last_event;
208 * DOC: sdma exported routines
210 * These sdma routines fit into three categories:
211 * - The SDMA API for building and submitting packets
214 * - Initialization and tear down routines to buildup
217 * - ISR entrances to handle interrupts, state changes
222 * DOC: sdma PSM/verbs API
224 * The sdma API is designed to be used by both PSM
225 * and verbs to supply packets to the SDMA ring.
227 * The usage of the API is as follows:
229 * Embed a struct iowait in the QP or
230 * PQ. The iowait should be initialized with a
231 * call to iowait_init().
233 * The user of the API should create an allocation method
234 * for their version of the txreq. slabs, pre-allocated lists,
235 * and dma pools can be used. Once the user's overload of
236 * the sdma_txreq has been allocated, the sdma_txreq member
237 * must be initialized with sdma_txinit() or sdma_txinit_ahg().
239 * The txreq must be declared with the sdma_txreq first.
241 * The tx request, once initialized, is manipulated with calls to
242 * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
243 * for each disjoint memory location. It is the user's responsibility
244 * to understand the packet boundaries and page boundaries to do the
245 * appropriate number of sdma_txadd_* calls.. The user
246 * must be prepared to deal with failures from these routines due to
247 * either memory allocation or dma_mapping failures.
249 * The mapping specifics for each memory location are recorded
250 * in the tx. Memory locations added with sdma_txadd_page()
251 * and sdma_txadd_kvaddr() are automatically mapped when added
252 * to the tx and nmapped as part of the progress processing in the
253 * SDMA interrupt handling.
255 * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
256 * tx. An example of a use case would be a pre-allocated
257 * set of headers allocated via dma_pool_alloc() or
258 * dma_alloc_coherent(). For these memory locations, it
259 * is the responsibility of the user to handle that unmapping.
260 * (This would usually be at an unload or job termination.)
262 * The routine sdma_send_txreq() is used to submit
263 * a tx to the ring after the appropriate number of
264 * sdma_txadd_* have been done.
266 * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
267 * can be used to submit a list of packets.
269 * The user is free to use the link overhead in the struct sdma_txreq as
270 * long as the tx isn't in flight.
272 * The extreme degenerate case of the number of descriptors
273 * exceeding the ring size is automatically handled as
274 * memory locations are added. An overflow of the descriptor
275 * array that is part of the sdma_txreq is also automatically
281 * DOC: Infrastructure calls
283 * sdma_init() is used to initialize data structures and
284 * CSRs for the desired number of SDMA engines.
286 * sdma_start() is used to kick the SDMA engines initialized
287 * with sdma_init(). Interrupts must be enabled at this
288 * point since aspects of the state machine are interrupt
291 * sdma_engine_error() and sdma_engine_interrupt() are
292 * entrances for interrupts.
294 * sdma_map_init() is for the management of the mapping
295 * table when the number of vls is changed.
300 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
302 * This is the raw descriptor in the SDMA ring
304 struct hw_sdma_desc {
305 /* private: don't use directly */
310 * struct sdma_engine - Data pertaining to each SDMA engine.
311 * @dd: a back-pointer to the device data
312 * @ppd: per port back-pointer
313 * @imask: mask for irq manipulation
314 * @idle_mask: mask for determining if an interrupt is due to sdma_idle
316 * This structure has the state for each sdma_engine.
318 * Accessing to non public fields are not supported
319 * since the private members are subject to change.
323 struct hfi1_devdata *dd;
324 struct hfi1_pportdata *ppd;
326 void __iomem *tail_csr;
327 u64 imask; /* clear interrupt mask */
332 volatile __le64 *head_dma; /* DMA'ed by chip */
334 dma_addr_t head_phys;
336 struct hw_sdma_desc *descq;
338 unsigned descq_full_count;
339 struct sdma_txreq **tx_ring;
341 dma_addr_t descq_phys;
345 struct sdma_state state;
351 u8 this_idx; /* zero relative engine */
352 /* protect changes to senddmactrl shadow */
353 spinlock_t senddmactrl_lock;
355 u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
357 /* read/write using tail_lock */
358 spinlock_t tail_lock ____cacheline_aligned_in_smp;
359 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
366 unsigned long ahg_bits;
374 /* read/write using head_lock */
376 seqlock_t head_lock ____cacheline_aligned_in_smp;
377 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
392 u64 progress_int_cnt;
395 struct list_head dmawait;
397 /* CONFIG SDMA for now, just blindly duplicate */
399 struct tasklet_struct sdma_hw_clean_up_task
400 ____cacheline_aligned_in_smp;
403 struct tasklet_struct sdma_sw_clean_up_task
404 ____cacheline_aligned_in_smp;
406 struct work_struct err_halt_worker;
408 struct timer_list err_progress_check_timer;
409 u32 progress_check_head;
411 struct work_struct flush_worker;
412 /* protect flush list */
413 spinlock_t flushlist_lock;
415 struct list_head flushlist;
416 struct cpumask cpu_mask;
420 int sdma_init(struct hfi1_devdata *dd, u8 port);
421 void sdma_start(struct hfi1_devdata *dd);
422 void sdma_exit(struct hfi1_devdata *dd);
423 void sdma_all_running(struct hfi1_devdata *dd);
424 void sdma_all_idle(struct hfi1_devdata *dd);
425 void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
426 void sdma_freeze(struct hfi1_devdata *dd);
427 void sdma_unfreeze(struct hfi1_devdata *dd);
428 void sdma_wait(struct hfi1_devdata *dd);
431 * sdma_empty() - idle engine test
432 * @engine: sdma engine
434 * Currently used by verbs as a latency optimization.
437 * 1 - empty, 0 - non-empty
439 static inline int sdma_empty(struct sdma_engine *sde)
441 return sde->descq_tail == sde->descq_head;
444 static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
446 return sde->descq_cnt -
448 ACCESS_ONCE(sde->descq_head)) - 1;
451 static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
453 return sde->descq_cnt - sdma_descq_freecnt(sde);
457 * Either head_lock or tail lock required to see
460 static inline int __sdma_running(struct sdma_engine *engine)
462 return engine->state.current_state == sdma_state_s99_running;
466 * sdma_running() - state suitability test
467 * @engine: sdma engine
469 * sdma_running probes the internal state to determine if it is suitable
470 * for submitting packets.
473 * 1 - ok to submit, 0 - not ok to submit
476 static inline int sdma_running(struct sdma_engine *engine)
481 spin_lock_irqsave(&engine->tail_lock, flags);
482 ret = __sdma_running(engine);
483 spin_unlock_irqrestore(&engine->tail_lock, flags);
487 void _sdma_txreq_ahgadd(
488 struct sdma_txreq *tx,
495 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
496 * @tx: tx request to initialize
497 * @flags: flags to key last descriptor additions
498 * @tlen: total packet length (pbc + headers + data)
499 * @ahg_entry: ahg entry to use (0 - 31)
500 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
501 * @ahg: array of AHG descriptors (up to 9 entries)
502 * @ahg_hlen: number of bytes from ASIC entry to use
505 * The allocation of the sdma_txreq and it enclosing structure is user
506 * dependent. This routine must be called to initialize the user independent
509 * The currently supported flags are SDMA_TXREQ_F_URGENT,
510 * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
512 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
513 * completion is desired as soon as possible.
515 * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
516 * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
517 * the AHG descriptors into the first 1 to 3 descriptors.
519 * Completions of submitted requests can be gotten on selected
520 * txreqs by giving a completion routine callback to sdma_txinit() or
521 * sdma_txinit_ahg(). The environment in which the callback runs
522 * can be from an ISR, a tasklet, or a thread, so no sleeping
523 * kernel routines can be used. Aspects of the sdma ring may
524 * be locked so care should be taken with locking.
526 * The callback pointer can be NULL to avoid any callback for the packet
527 * being submitted. The callback will be provided this tx, a status, and a flag.
529 * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
530 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
532 * The flag, if the is the iowait had been used, indicates the iowait
533 * sdma_busy count has reached zero.
535 * user data portion of tlen should be precise. The sdma_txadd_* entrances
536 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
537 * specified in tlen have been supplied to the sdma_txreq.
539 * ahg_hlen is used to determine the number of on-chip entry bytes to
540 * use as the header. This is for cases where the stored header is
541 * larger than the header to be used in a packet. This is typical
542 * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
543 * and RDMA_WRITE_MIDDLE.
546 static inline int sdma_txinit_ahg(
547 struct sdma_txreq *tx,
554 void (*cb)(struct sdma_txreq *, int))
558 if (tlen > MAX_SDMA_PKT_SIZE)
560 tx->desc_limit = ARRAY_SIZE(tx->descs);
561 tx->descp = &tx->descs[0];
562 INIT_LIST_HEAD(&tx->list);
566 tx->coalesce_buf = NULL;
568 tx->packet_len = tlen;
569 tx->tlen = tx->packet_len;
570 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
571 tx->descs[0].qw[1] = 0;
572 if (flags & SDMA_TXREQ_F_AHG_COPY)
573 tx->descs[0].qw[1] |=
574 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
575 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
576 (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
577 << SDMA_DESC1_HEADER_MODE_SHIFT);
578 else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
579 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
584 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
585 * @tx: tx request to initialize
586 * @flags: flags to key last descriptor additions
587 * @tlen: total packet length (pbc + headers + data)
588 * @cb: callback pointer
590 * The allocation of the sdma_txreq and it enclosing structure is user
591 * dependent. This routine must be called to initialize the user
592 * independent fields.
594 * The currently supported flags is SDMA_TXREQ_F_URGENT.
596 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
597 * completion is desired as soon as possible.
599 * Completions of submitted requests can be gotten on selected
600 * txreqs by giving a completion routine callback to sdma_txinit() or
601 * sdma_txinit_ahg(). The environment in which the callback runs
602 * can be from an ISR, a tasklet, or a thread, so no sleeping
603 * kernel routines can be used. The head size of the sdma ring may
604 * be locked so care should be taken with locking.
606 * The callback pointer can be NULL to avoid any callback for the packet
609 * The callback, if non-NULL, will be provided this tx and a status. The
610 * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
611 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
614 static inline int sdma_txinit(
615 struct sdma_txreq *tx,
618 void (*cb)(struct sdma_txreq *, int))
620 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
623 /* helpers - don't use */
624 static inline int sdma_mapping_type(struct sdma_desc *d)
626 return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
627 >> SDMA_DESC1_GENERATION_SHIFT;
630 static inline size_t sdma_mapping_len(struct sdma_desc *d)
632 return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
633 >> SDMA_DESC0_BYTE_COUNT_SHIFT;
636 static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
638 return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
639 >> SDMA_DESC0_PHY_ADDR_SHIFT;
642 static inline void make_tx_sdma_desc(
643 struct sdma_txreq *tx,
648 struct sdma_desc *desc = &tx->descp[tx->num_desc];
651 /* qw[0] zero; qw[1] first, ahg mode already in from init */
652 desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
653 << SDMA_DESC1_GENERATION_SHIFT;
656 desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
657 << SDMA_DESC1_GENERATION_SHIFT;
659 desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
660 << SDMA_DESC0_PHY_ADDR_SHIFT) |
661 (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
662 << SDMA_DESC0_BYTE_COUNT_SHIFT);
665 /* helper to extend txreq */
666 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
667 int type, void *kvaddr, struct page *page,
668 unsigned long offset, u16 len);
669 int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
670 void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
672 static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
675 __sdma_txclean(dd, tx);
678 /* helpers used by public routines */
679 static inline void _sdma_close_tx(struct hfi1_devdata *dd,
680 struct sdma_txreq *tx)
682 tx->descp[tx->num_desc].qw[0] |=
683 SDMA_DESC0_LAST_DESC_FLAG;
684 tx->descp[tx->num_desc].qw[1] |=
686 if (tx->flags & SDMA_TXREQ_F_URGENT)
687 tx->descp[tx->num_desc].qw[1] |=
688 (SDMA_DESC1_HEAD_TO_HOST_FLAG |
689 SDMA_DESC1_INT_REQ_FLAG);
692 static inline int _sdma_txadd_daddr(
693 struct hfi1_devdata *dd,
695 struct sdma_txreq *tx,
705 WARN_ON(len > tx->tlen);
707 /* special cases for last */
709 if (tx->packet_len & (sizeof(u32) - 1)) {
710 rval = _pad_sdma_tx_descs(dd, tx);
714 _sdma_close_tx(dd, tx);
722 * sdma_txadd_page() - add a page to the sdma_txreq
723 * @dd: the device to use for mapping
724 * @tx: tx request to which the page is added
726 * @offset: offset within the page
727 * @len: length in bytes
729 * This is used to add a page/offset/length descriptor.
731 * The mapping/unmapping of the page/offset/len is automatically handled.
734 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
735 * extend/coalesce descriptor array
737 static inline int sdma_txadd_page(
738 struct hfi1_devdata *dd,
739 struct sdma_txreq *tx,
741 unsigned long offset,
747 if ((unlikely(tx->num_desc == tx->desc_limit))) {
748 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
749 NULL, page, offset, len);
761 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
762 __sdma_txclean(dd, tx);
766 return _sdma_txadd_daddr(
767 dd, SDMA_MAP_PAGE, tx, addr, len);
771 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
772 * @dd: the device to use for mapping
773 * @tx: sdma_txreq to which the page is added
774 * @addr: dma address mapped by caller
775 * @len: length in bytes
777 * This is used to add a descriptor for memory that is already dma mapped.
779 * In this case, there is no unmapping as part of the progress processing for
780 * this memory location.
783 * 0 - success, -ENOMEM - couldn't extend descriptor array
786 static inline int sdma_txadd_daddr(
787 struct hfi1_devdata *dd,
788 struct sdma_txreq *tx,
794 if ((unlikely(tx->num_desc == tx->desc_limit))) {
795 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
801 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
805 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
806 * @dd: the device to use for mapping
807 * @tx: sdma_txreq to which the page is added
808 * @kvaddr: the kernel virtual address
809 * @len: length in bytes
811 * This is used to add a descriptor referenced by the indicated kvaddr and
814 * The mapping/unmapping of the kvaddr and len is automatically handled.
817 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
820 static inline int sdma_txadd_kvaddr(
821 struct hfi1_devdata *dd,
822 struct sdma_txreq *tx,
829 if ((unlikely(tx->num_desc == tx->desc_limit))) {
830 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
831 kvaddr, NULL, 0, len);
836 addr = dma_map_single(
842 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
843 __sdma_txclean(dd, tx);
847 return _sdma_txadd_daddr(
848 dd, SDMA_MAP_SINGLE, tx, addr, len);
853 int sdma_send_txreq(struct sdma_engine *sde,
855 struct sdma_txreq *tx);
856 int sdma_send_txlist(struct sdma_engine *sde,
858 struct list_head *tx_list,
861 int sdma_ahg_alloc(struct sdma_engine *sde);
862 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
865 * sdma_build_ahg - build ahg descriptor
871 * Build and return a 32 bit descriptor.
873 static inline u32 sdma_build_ahg_descriptor(
879 return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
880 ((startbit & SDMA_AHG_FIELD_START_MASK) <<
881 SDMA_AHG_FIELD_START_SHIFT) |
882 ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
883 SDMA_AHG_FIELD_LEN_SHIFT) |
884 ((dwindex & SDMA_AHG_INDEX_MASK) <<
885 SDMA_AHG_INDEX_SHIFT) |
886 ((data & SDMA_AHG_VALUE_MASK) <<
887 SDMA_AHG_VALUE_SHIFT));
891 * sdma_progress - use seq number of detect head progress
892 * @sde: sdma_engine to check
893 * @seq: base seq count
894 * @tx: txreq for which we need to check descriptor availability
896 * This is used in the appropriate spot in the sleep routine
897 * to check for potential ring progress. This routine gets the
898 * seqcount before queuing the iowait structure for progress.
900 * If the seqcount indicates that progress needs to be checked,
901 * re-submission is detected by checking whether the descriptor
902 * queue has enough descriptor for the txreq.
904 static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
905 struct sdma_txreq *tx)
907 if (read_seqretry(&sde->head_lock, seq)) {
908 sde->desc_avail = sdma_descq_freecnt(sde);
909 if (tx->num_desc > sde->desc_avail)
917 * sdma_iowait_schedule() - initialize wait structure
918 * @sde: sdma_engine to schedule
919 * @wait: wait struct to schedule
921 * This function initializes the iowait
922 * structure embedded in the QP or PQ.
925 static inline void sdma_iowait_schedule(
926 struct sdma_engine *sde,
929 struct hfi1_pportdata *ppd = sde->dd->pport;
931 iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
934 /* for use by interrupt handling */
935 void sdma_engine_error(struct sdma_engine *sde, u64 status);
936 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
940 * The diagram below details the relationship of the mapping structures
942 * Since the mapping now allows for non-uniform engines per vl, the
943 * number of engines for a vl is either the vl_engines[vl] or
944 * a computation based on num_sdma/num_vls:
947 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
949 * n = roundup to next highest power of 2 using nactual
951 * In the case where there are num_sdma/num_vls doesn't divide
952 * evenly, the extras are added from the last vl downward.
954 * For the case where n > nactual, the engines are assigned
955 * in a round robin fashion wrapping back to the first engine
956 * for a particular vl.
960 * | +--------------------+
962 * sdma_vl_map |--------------------|
963 * +--------------------------+ | sde[0] -> eng 1 |
964 * | list (RCU) | |--------------------|
965 * |--------------------------| ->| sde[1] -> eng 2 |
966 * | mask | --/ |--------------------|
967 * |--------------------------| -/ | * |
968 * | actual_vls (max 8) | -/ |--------------------|
969 * |--------------------------| --/ | sde[n] -> eng n |
970 * | vls (max 8) | -/ +--------------------+
971 * |--------------------------| --/
973 * |--------------------------| +--------------------+
974 * | map[1] |--- | mask |
975 * |--------------------------| \---- |--------------------|
976 * | * | \-- | sde[0] -> eng 1+n |
977 * | * | \---- |--------------------|
978 * | * | \->| sde[1] -> eng 2+n |
979 * |--------------------------| |--------------------|
980 * | map[vls - 1] |- | * |
981 * +--------------------------+ \- |--------------------|
982 * \- | sde[m] -> eng m+n |
983 * \ +--------------------+
986 * \- +--------------------+
988 * \ |--------------------|
989 * \- | sde[0] -> eng 1+m+n|
990 * \- |--------------------|
991 * >| sde[1] -> eng 2+m+n|
992 * |--------------------|
994 * |--------------------|
995 * | sde[o] -> eng o+m+n|
996 * +--------------------+
1001 * struct sdma_map_elem - mapping for a vl
1002 * @mask - selector mask
1003 * @sde - array of engines for this vl
1005 * The mask is used to "mod" the selector
1006 * to produce index into the trailing
1009 struct sdma_map_elem {
1011 struct sdma_engine *sde[0];
1015 * struct sdma_map_el - mapping for a vl
1016 * @engine_to_vl - map of an engine to a vl
1017 * @list - rcu head for free callback
1018 * @mask - vl mask to "mod" the vl to produce an index to map array
1019 * @actual_vls - number of vls
1020 * @vls - number of vls rounded to next power of 2
1021 * @map - array of sdma_map_elem entries
1023 * This is the parent mapping structure. The trailing
1024 * members of the struct point to sdma_map_elem entries, which
1025 * in turn point to an array of sde's for that vl.
1027 struct sdma_vl_map {
1028 s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
1029 struct rcu_head list;
1033 struct sdma_map_elem *map[0];
1037 struct hfi1_devdata *dd,
1043 void _sdma_engine_progress_schedule(struct sdma_engine *sde);
1046 * sdma_engine_progress_schedule() - schedule progress on engine
1047 * @sde: sdma_engine to schedule progress
1049 * This is the fast path.
1052 static inline void sdma_engine_progress_schedule(
1053 struct sdma_engine *sde)
1055 if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
1057 _sdma_engine_progress_schedule(sde);
1060 struct sdma_engine *sdma_select_engine_sc(
1061 struct hfi1_devdata *dd,
1065 struct sdma_engine *sdma_select_engine_vl(
1066 struct hfi1_devdata *dd,
1070 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
1071 u32 selector, u8 vl);
1072 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
1073 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
1075 int sdma_engine_get_vl(struct sdma_engine *sde);
1076 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
1077 void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
1078 unsigned long cpuid);
1080 #ifdef CONFIG_SDMA_VERBOSITY
1081 void sdma_dumpstate(struct sdma_engine *);
1083 static inline char *slashstrip(char *s)
1093 u16 sdma_get_descq_cnt(void);
1095 extern uint mod_num_sdma;
1097 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);