1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2021 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
24 #include "ipa_power.h"
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
28 /* Hardware is told about receive buffers once a "batch" has been queued */
29 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
31 /* The amount of RX buffer space consumed by standard skb overhead */
32 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
34 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
35 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
37 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
39 /** enum ipa_status_opcode - status element opcode hardware values */
40 enum ipa_status_opcode {
41 IPA_STATUS_OPCODE_PACKET = 0x01,
42 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
43 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
44 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
47 /** enum ipa_status_exception - status element exception type */
48 enum ipa_status_exception {
49 /* 0 means no exception */
50 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
53 /* Status element provided by hardware */
55 u8 opcode; /* enum ipa_status_opcode */
56 u8 exception; /* enum ipa_status_exception */
68 /* Field masks for struct ipa_status structure fields */
69 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
70 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
71 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
72 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
73 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
75 static u32 aggr_byte_limit_max(enum ipa_version version)
77 if (version < IPA_VERSION_4_5)
78 return field_max(aggr_byte_limit_fmask(true));
80 return field_max(aggr_byte_limit_fmask(false));
83 /* Compute the aggregation size value to use for a given buffer size */
84 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
86 /* A hard aggregation limit will not be crossed; aggregation closes
87 * if saving incoming data would cross the hard byte limit boundary.
89 * With a soft limit, aggregation closes *after* the size boundary
90 * has been crossed. In that case the limit must leave enough space
91 * after that limit to receive a full MTU of data plus overhead.
94 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
96 /* The byte limit is encoded as a number of kilobytes */
98 return rx_buffer_size / SZ_1K;
101 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
102 const struct ipa_gsi_endpoint_data *all_data,
103 const struct ipa_gsi_endpoint_data *data)
105 const struct ipa_gsi_endpoint_data *other_data;
106 struct device *dev = &ipa->pdev->dev;
107 enum ipa_endpoint_name other_name;
109 if (ipa_gsi_endpoint_data_empty(data))
112 if (!data->toward_ipa) {
113 const struct ipa_endpoint_rx *rx_config;
118 if (data->endpoint.filter_support) {
119 dev_err(dev, "filtering not supported for "
125 /* Nothing more to check for non-AP RX */
126 if (data->ee_id != GSI_EE_AP)
129 rx_config = &data->endpoint.config.rx;
131 /* The buffer size must hold an MTU plus overhead */
132 buffer_size = rx_config->buffer_size;
133 limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
134 if (buffer_size < limit) {
135 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
136 data->endpoint_id, buffer_size, limit);
140 if (!data->endpoint.config.aggregation) {
143 /* No aggregation; check for bogus aggregation data */
144 if (rx_config->aggr_time_limit) {
146 "time limit with no aggregation for RX endpoint %u\n",
151 if (rx_config->aggr_hard_limit) {
152 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
157 if (rx_config->aggr_close_eof) {
158 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
163 return result; /* Nothing more to check */
166 /* For an endpoint supporting receive aggregation, the byte
167 * limit defines the point at which aggregation closes. This
168 * check ensures the receive buffer size doesn't result in a
169 * limit that exceeds what's representable in the aggregation
172 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
173 rx_config->aggr_hard_limit);
174 limit = aggr_byte_limit_max(ipa->version);
175 if (aggr_size > limit) {
176 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
177 data->endpoint_id, aggr_size, limit);
182 return true; /* Nothing more to check for RX */
185 if (data->endpoint.config.status_enable) {
186 other_name = data->endpoint.config.tx.status_endpoint;
187 if (other_name >= count) {
188 dev_err(dev, "status endpoint name %u out of range "
190 other_name, data->endpoint_id);
194 /* Status endpoint must be defined... */
195 other_data = &all_data[other_name];
196 if (ipa_gsi_endpoint_data_empty(other_data)) {
197 dev_err(dev, "DMA endpoint name %u undefined "
199 other_name, data->endpoint_id);
203 /* ...and has to be an RX endpoint... */
204 if (other_data->toward_ipa) {
206 "status endpoint for endpoint %u not RX\n",
211 /* ...and if it's to be an AP endpoint... */
212 if (other_data->ee_id == GSI_EE_AP) {
213 /* ...make sure it has status enabled. */
214 if (!other_data->endpoint.config.status_enable) {
216 "status not enabled for endpoint %u\n",
217 other_data->endpoint_id);
223 if (data->endpoint.config.dma_mode) {
224 other_name = data->endpoint.config.dma_endpoint;
225 if (other_name >= count) {
226 dev_err(dev, "DMA endpoint name %u out of range "
228 other_name, data->endpoint_id);
232 other_data = &all_data[other_name];
233 if (ipa_gsi_endpoint_data_empty(other_data)) {
234 dev_err(dev, "DMA endpoint name %u undefined "
236 other_name, data->endpoint_id);
244 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
245 const struct ipa_gsi_endpoint_data *data)
247 const struct ipa_gsi_endpoint_data *dp = data;
248 struct device *dev = &ipa->pdev->dev;
249 enum ipa_endpoint_name name;
251 if (count > IPA_ENDPOINT_COUNT) {
252 dev_err(dev, "too many endpoints specified (%u > %u)\n",
253 count, IPA_ENDPOINT_COUNT);
257 /* Make sure needed endpoints have defined data */
258 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
259 dev_err(dev, "command TX endpoint not defined\n");
262 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
263 dev_err(dev, "LAN RX endpoint not defined\n");
266 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
267 dev_err(dev, "AP->modem TX endpoint not defined\n");
270 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
271 dev_err(dev, "AP<-modem RX endpoint not defined\n");
275 for (name = 0; name < count; name++, dp++)
276 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
282 /* Allocate a transaction to use on a non-command endpoint */
283 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
286 struct gsi *gsi = &endpoint->ipa->gsi;
287 u32 channel_id = endpoint->channel_id;
288 enum dma_data_direction direction;
290 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
292 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
295 /* suspend_delay represents suspend for RX, delay for TX endpoints.
296 * Note that suspend is not supported starting with IPA v4.0, and
297 * delay mode should not be used starting with IPA v4.2.
300 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
302 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
303 struct ipa *ipa = endpoint->ipa;
308 if (endpoint->toward_ipa)
309 WARN_ON(ipa->version >= IPA_VERSION_4_2);
311 WARN_ON(ipa->version >= IPA_VERSION_4_0);
313 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
315 val = ioread32(ipa->reg_virt + offset);
316 state = !!(val & mask);
318 /* Don't bother if it's already in the requested state */
319 if (suspend_delay != state) {
321 iowrite32(val, ipa->reg_virt + offset);
327 /* We don't care what the previous state was for delay mode */
329 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
331 /* Delay mode should not be used for IPA v4.2+ */
332 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
333 WARN_ON(!endpoint->toward_ipa);
335 (void)ipa_endpoint_init_ctrl(endpoint, enable);
338 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
340 u32 mask = BIT(endpoint->endpoint_id);
341 struct ipa *ipa = endpoint->ipa;
345 WARN_ON(!(mask & ipa->available));
347 offset = ipa_reg_state_aggr_active_offset(ipa->version);
348 val = ioread32(ipa->reg_virt + offset);
350 return !!(val & mask);
353 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
355 u32 mask = BIT(endpoint->endpoint_id);
356 struct ipa *ipa = endpoint->ipa;
358 WARN_ON(!(mask & ipa->available));
360 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
364 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
365 * @endpoint: Endpoint on which to emulate a suspend
367 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
368 * with an open aggregation frame. This is to work around a hardware
369 * issue in IPA version 3.5.1 where the suspend interrupt will not be
370 * generated when it should be.
372 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
374 struct ipa *ipa = endpoint->ipa;
376 if (!endpoint->config.aggregation)
379 /* Nothing to do if the endpoint doesn't have aggregation open */
380 if (!ipa_endpoint_aggr_active(endpoint))
383 /* Force close aggregation */
384 ipa_endpoint_force_close(endpoint);
386 ipa_interrupt_simulate_suspend(ipa->interrupt);
389 /* Returns previous suspend state (true means suspend was enabled) */
391 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
395 if (endpoint->ipa->version >= IPA_VERSION_4_0)
396 return enable; /* For IPA v4.0+, no change made */
398 WARN_ON(endpoint->toward_ipa);
400 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
402 /* A client suspended with an open aggregation frame will not
403 * generate a SUSPEND IPA interrupt. If enabling suspend, have
404 * ipa_endpoint_suspend_aggr() handle this.
406 if (enable && !suspended)
407 ipa_endpoint_suspend_aggr(endpoint);
412 /* Put all modem RX endpoints into suspend mode, and stop transmission
413 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
414 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
417 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
421 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
422 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
424 if (endpoint->ee_id != GSI_EE_MODEM)
427 if (!endpoint->toward_ipa)
428 (void)ipa_endpoint_program_suspend(endpoint, enable);
429 else if (ipa->version < IPA_VERSION_4_2)
430 ipa_endpoint_program_delay(endpoint, enable);
432 gsi_modem_channel_flow_control(&ipa->gsi,
433 endpoint->channel_id,
438 /* Reset all modem endpoints to use the default exception endpoint */
439 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
441 u32 initialized = ipa->initialized;
442 struct gsi_trans *trans;
445 /* We need one command per modem TX endpoint, plus the commands
446 * that clear the pipeline.
448 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
449 trans = ipa_cmd_trans_alloc(ipa, count);
451 dev_err(&ipa->pdev->dev,
452 "no transaction to reset modem exception endpoints\n");
456 while (initialized) {
457 u32 endpoint_id = __ffs(initialized);
458 struct ipa_endpoint *endpoint;
461 initialized ^= BIT(endpoint_id);
463 /* We only reset modem TX endpoints */
464 endpoint = &ipa->endpoint[endpoint_id];
465 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
468 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
470 /* Value written is 0, and all bits are updated. That
471 * means status is disabled on the endpoint, and as a
472 * result all other fields in the register are ignored.
474 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
477 ipa_cmd_pipeline_clear_add(trans);
479 gsi_trans_commit_wait(trans);
481 ipa_cmd_pipeline_clear_wait(ipa);
486 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
488 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
489 enum ipa_cs_offload_en enabled;
492 /* FRAG_OFFLOAD_EN is 0 */
493 if (endpoint->config.checksum) {
494 enum ipa_version version = endpoint->ipa->version;
496 if (endpoint->toward_ipa) {
499 /* Checksum header offset is in 4-byte units */
500 checksum_offset = sizeof(struct rmnet_map_header);
501 checksum_offset /= sizeof(u32);
502 val |= u32_encode_bits(checksum_offset,
503 CS_METADATA_HDR_OFFSET_FMASK);
505 enabled = version < IPA_VERSION_4_5
507 : IPA_CS_OFFLOAD_INLINE;
509 enabled = version < IPA_VERSION_4_5
511 : IPA_CS_OFFLOAD_INLINE;
514 enabled = IPA_CS_OFFLOAD_NONE;
516 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
517 /* CS_GEN_QMB_MASTER_SEL is 0 */
519 iowrite32(val, endpoint->ipa->reg_virt + offset);
522 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
527 if (!endpoint->toward_ipa)
530 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
531 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
533 iowrite32(val, endpoint->ipa->reg_virt + offset);
537 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
539 u32 header_size = sizeof(struct rmnet_map_header);
541 /* Without checksum offload, we just have the MAP header */
542 if (!endpoint->config.checksum)
545 if (version < IPA_VERSION_4_5) {
546 /* Checksum header inserted for AP TX endpoints only */
547 if (endpoint->toward_ipa)
548 header_size += sizeof(struct rmnet_map_ul_csum_header);
550 /* Checksum header is used in both directions */
551 header_size += sizeof(struct rmnet_map_v5_csum_header);
558 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
559 * @endpoint: Endpoint pointer
561 * We program QMAP endpoints so each packet received is preceded by a QMAP
562 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
563 * packet size field, and we have the IPA hardware populate both for each
564 * received packet. The header is configured (in the HDR_EXT register)
565 * to use big endian format.
567 * The packet size is written into the QMAP header's pkt_len field. That
568 * location is defined here using the HDR_OFST_PKT_SIZE field.
570 * The mux_id comes from a 4-byte metadata value supplied with each packet
571 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
572 * value that we want, in its low-order byte. A bitmask defined in the
573 * endpoint's METADATA_MASK register defines which byte within the modem
574 * metadata contains the mux_id. And the OFST_METADATA field programmed
575 * here indicates where the extracted byte should be placed within the QMAP
578 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
580 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
581 struct ipa *ipa = endpoint->ipa;
584 if (endpoint->config.qmap) {
585 enum ipa_version version = ipa->version;
588 header_size = ipa_qmap_header_size(version, endpoint);
589 val = ipa_header_size_encoded(version, header_size);
591 /* Define how to fill fields in a received QMAP header */
592 if (!endpoint->toward_ipa) {
593 u32 offset; /* Field offset within header */
595 /* Where IPA will write the metadata value */
596 offset = offsetof(struct rmnet_map_header, mux_id);
597 val |= ipa_metadata_offset_encoded(version, offset);
599 /* Where IPA will write the length */
600 offset = offsetof(struct rmnet_map_header, pkt_len);
601 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
602 if (version >= IPA_VERSION_4_5)
603 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
605 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
606 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
608 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
609 val |= HDR_OFST_METADATA_VALID_FMASK;
611 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
612 /* HDR_A5_MUX is 0 */
613 /* HDR_LEN_INC_DEAGG_HDR is 0 */
614 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
617 iowrite32(val, ipa->reg_virt + offset);
620 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
622 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
623 u32 pad_align = endpoint->config.rx.pad_align;
624 struct ipa *ipa = endpoint->ipa;
627 if (endpoint->config.qmap) {
628 /* We have a header, so we must specify its endianness */
629 val |= HDR_ENDIANNESS_FMASK; /* big endian */
631 /* A QMAP header contains a 6 bit pad field at offset 0.
632 * The RMNet driver assumes this field is meaningful in
633 * packets it receives, and assumes the header's payload
634 * length includes that padding. The RMNet driver does
635 * *not* pad packets it sends, however, so the pad field
636 * (although 0) should be ignored.
638 if (!endpoint->toward_ipa) {
639 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
640 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
641 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
642 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
646 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
647 if (!endpoint->toward_ipa)
648 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
650 /* IPA v4.5 adds some most-significant bits to a few fields,
651 * two of which are defined in the HDR (not HDR_EXT) register.
653 if (ipa->version >= IPA_VERSION_4_5) {
654 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
655 if (endpoint->config.qmap && !endpoint->toward_ipa) {
658 offset = offsetof(struct rmnet_map_header, pkt_len);
659 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
660 val |= u32_encode_bits(offset,
661 HDR_OFST_PKT_SIZE_MSB_FMASK);
662 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
665 iowrite32(val, ipa->reg_virt + offset);
668 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
670 u32 endpoint_id = endpoint->endpoint_id;
674 if (endpoint->toward_ipa)
675 return; /* Register not valid for TX endpoints */
677 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
679 /* Note that HDR_ENDIANNESS indicates big endian header fields */
680 if (endpoint->config.qmap)
681 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
683 iowrite32(val, endpoint->ipa->reg_virt + offset);
686 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
688 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
691 if (!endpoint->toward_ipa)
692 return; /* Register not valid for RX endpoints */
694 if (endpoint->config.dma_mode) {
695 enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
698 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
700 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
701 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
703 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
705 /* All other bits unspecified (and 0) */
707 iowrite32(val, endpoint->ipa->reg_virt + offset);
710 /* Encoded values for AGGR endpoint register fields */
711 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
713 if (version < IPA_VERSION_4_5)
714 return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
716 return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
719 /* Encode the aggregation timer limit (microseconds) based on IPA version */
720 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
726 if (version < IPA_VERSION_4_5) {
727 /* We set aggregation granularity in ipa_hardware_config() */
728 fmask = aggr_time_limit_fmask(true);
729 val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
730 WARN(val > field_max(fmask),
731 "aggr_time_limit too large (%u > %u usec)\n",
732 val, field_max(fmask) * IPA_AGGR_GRANULARITY);
734 return u32_encode_bits(val, fmask);
737 /* IPA v4.5 expresses the time limit using Qtime. The AP has
738 * pulse generators 0 and 1 available, which were configured
739 * in ipa_qtime_config() to have granularity 100 usec and
740 * 1 msec, respectively. Use pulse generator 0 if possible,
741 * otherwise fall back to pulse generator 1.
743 fmask = aggr_time_limit_fmask(false);
744 val = DIV_ROUND_CLOSEST(limit, 100);
745 if (val > field_max(fmask)) {
746 /* Have to use pulse generator 1 (millisecond granularity) */
747 gran_sel = AGGR_GRAN_SEL_FMASK;
748 val = DIV_ROUND_CLOSEST(limit, 1000);
749 WARN(val > field_max(fmask),
750 "aggr_time_limit too large (%u > %u usec)\n",
751 limit, field_max(fmask) * 1000);
753 /* We can use pulse generator 0 (100 usec granularity) */
757 return gran_sel | u32_encode_bits(val, fmask);
760 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
762 u32 val = enabled ? 1 : 0;
764 if (version < IPA_VERSION_4_5)
765 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
767 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
770 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
772 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
773 enum ipa_version version = endpoint->ipa->version;
776 if (endpoint->config.aggregation) {
777 if (!endpoint->toward_ipa) {
778 const struct ipa_endpoint_rx *rx_config;
783 rx_config = &endpoint->config.rx;
784 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
785 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
787 buffer_size = rx_config->buffer_size;
788 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
789 rx_config->aggr_hard_limit);
790 val |= aggr_byte_limit_encoded(version, limit);
792 limit = rx_config->aggr_time_limit;
793 val |= aggr_time_limit_encoded(version, limit);
795 /* AGGR_PKT_LIMIT is 0 (unlimited) */
797 close_eof = rx_config->aggr_close_eof;
798 val |= aggr_sw_eof_active_encoded(version, close_eof);
800 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
802 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
803 /* other fields ignored */
805 /* AGGR_FORCE_CLOSE is 0 */
806 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
808 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
809 /* other fields ignored */
812 iowrite32(val, endpoint->ipa->reg_virt + offset);
815 /* Return the Qtime-based head-of-line blocking timer value that
816 * represents the given number of microseconds. The result
817 * includes both the timer value and the selected timer granularity.
819 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
824 /* IPA v4.5 expresses time limits using Qtime. The AP has
825 * pulse generators 0 and 1 available, which were configured
826 * in ipa_qtime_config() to have granularity 100 usec and
827 * 1 msec, respectively. Use pulse generator 0 if possible,
828 * otherwise fall back to pulse generator 1.
830 val = DIV_ROUND_CLOSEST(microseconds, 100);
831 if (val > field_max(TIME_LIMIT_FMASK)) {
832 /* Have to use pulse generator 1 (millisecond granularity) */
833 gran_sel = GRAN_SEL_FMASK;
834 val = DIV_ROUND_CLOSEST(microseconds, 1000);
836 /* We can use pulse generator 0 (100 usec granularity) */
840 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
843 /* The head-of-line blocking timer is defined as a tick count. For
844 * IPA version 4.5 the tick count is based on the Qtimer, which is
845 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
846 * each tick represents 128 cycles of the IPA core clock.
848 * Return the encoded value that should be written to that register
849 * that represents the timeout period provided. For IPA v4.2 this
850 * encodes a base and scale value, while for earlier versions the
851 * value is a simple tick count.
853 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
863 return 0; /* Nothing to compute if timer period is 0 */
865 if (ipa->version >= IPA_VERSION_4_5)
866 return hol_block_timer_qtime_val(ipa, microseconds);
868 /* Use 64 bit arithmetic to avoid overflow... */
869 rate = ipa_core_clock_rate(ipa);
870 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
871 /* ...but we still need to fit into a 32-bit register */
872 WARN_ON(ticks > U32_MAX);
874 /* IPA v3.5.1 through v4.1 just record the tick count */
875 if (ipa->version < IPA_VERSION_4_2)
878 /* For IPA v4.2, the tick count is represented by base and
879 * scale fields within the 32-bit timer register, where:
880 * ticks = base << scale;
881 * The best precision is achieved when the base value is as
882 * large as possible. Find the highest set bit in the tick
883 * count, and extract the number of bits in the base field
884 * such that high bit is included.
886 high = fls(ticks); /* 1..32 */
887 width = HWEIGHT32(BASE_VALUE_FMASK);
888 scale = high > width ? high - width : 0;
890 /* If we're scaling, round up to get a closer result */
891 ticks += 1 << (scale - 1);
892 /* High bit was set, so rounding might have affected it */
893 if (fls(ticks) != high)
897 val = u32_encode_bits(scale, SCALE_FMASK);
898 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
903 /* If microseconds is 0, timeout is immediate */
904 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
907 u32 endpoint_id = endpoint->endpoint_id;
908 struct ipa *ipa = endpoint->ipa;
912 /* This should only be changed when HOL_BLOCK_EN is disabled */
913 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
914 val = hol_block_timer_val(ipa, microseconds);
915 iowrite32(val, ipa->reg_virt + offset);
919 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
921 u32 endpoint_id = endpoint->endpoint_id;
925 val = enable ? HOL_BLOCK_EN_FMASK : 0;
926 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
927 iowrite32(val, endpoint->ipa->reg_virt + offset);
928 /* When enabling, the register must be written twice for IPA v4.5+ */
929 if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
930 iowrite32(val, endpoint->ipa->reg_virt + offset);
933 /* Assumes HOL_BLOCK is in disabled state */
934 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
937 ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
938 ipa_endpoint_init_hol_block_en(endpoint, true);
941 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
943 ipa_endpoint_init_hol_block_en(endpoint, false);
946 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
950 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
951 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
953 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
956 ipa_endpoint_init_hol_block_disable(endpoint);
957 ipa_endpoint_init_hol_block_enable(endpoint, 0);
961 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
963 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
966 if (!endpoint->toward_ipa)
967 return; /* Register not valid for RX endpoints */
969 /* DEAGGR_HDR_LEN is 0 */
970 /* PACKET_OFFSET_VALID is 0 */
971 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
972 /* MAX_PACKET_LEN is 0 (not enforced) */
974 iowrite32(val, endpoint->ipa->reg_virt + offset);
977 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
979 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
980 struct ipa *ipa = endpoint->ipa;
983 val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
984 iowrite32(val, ipa->reg_virt + offset);
987 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
989 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
992 if (!endpoint->toward_ipa)
993 return; /* Register not valid for RX endpoints */
995 /* Low-order byte configures primary packet processing */
996 val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
998 /* Second byte configures replicated packet processing */
999 val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
1000 SEQ_REP_TYPE_FMASK);
1002 iowrite32(val, endpoint->ipa->reg_virt + offset);
1006 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1007 * @endpoint: Endpoint pointer
1008 * @skb: Socket buffer to send
1010 * Returns: 0 if successful, or a negative error code
1012 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1014 struct gsi_trans *trans;
1018 /* Make sure source endpoint's TLV FIFO has enough entries to
1019 * hold the linear portion of the skb and all its fragments.
1020 * If not, see if we can linearize it before giving up.
1022 nr_frags = skb_shinfo(skb)->nr_frags;
1023 if (nr_frags > endpoint->skb_frag_max) {
1024 if (skb_linearize(skb))
1029 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1033 ret = gsi_trans_skb_add(trans, skb);
1035 goto err_trans_free;
1036 trans->data = skb; /* transaction owns skb now */
1038 gsi_trans_commit(trans, !netdev_xmit_more());
1043 gsi_trans_free(trans);
1048 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1050 u32 endpoint_id = endpoint->endpoint_id;
1051 struct ipa *ipa = endpoint->ipa;
1055 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
1057 if (endpoint->config.status_enable) {
1058 val |= STATUS_EN_FMASK;
1059 if (endpoint->toward_ipa) {
1060 enum ipa_endpoint_name name;
1061 u32 status_endpoint_id;
1063 name = endpoint->config.tx.status_endpoint;
1064 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1066 val |= u32_encode_bits(status_endpoint_id,
1069 /* STATUS_LOCATION is 0, meaning status element precedes
1070 * packet (not present for IPA v4.5)
1072 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1075 iowrite32(val, ipa->reg_virt + offset);
1078 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1079 struct gsi_trans *trans)
1087 buffer_size = endpoint->config.rx.buffer_size;
1088 page = dev_alloc_pages(get_order(buffer_size));
1092 /* Offset the buffer to make space for skb headroom */
1093 offset = NET_SKB_PAD;
1094 len = buffer_size - offset;
1096 ret = gsi_trans_page_add(trans, page, len, offset);
1100 trans->data = page; /* transaction owns page now */
1106 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1107 * @endpoint: Endpoint to be replenished
1109 * The IPA hardware can hold a fixed number of receive buffers for an RX
1110 * endpoint, based on the number of entries in the underlying channel ring
1111 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1112 * more receive buffers can be supplied to the hardware. Replenishing for
1113 * an endpoint can be disabled, in which case buffers are not queued to
1116 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1118 struct gsi_trans *trans;
1120 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1123 /* Skip it if it's already active */
1124 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1127 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1130 if (ipa_endpoint_replenish_one(endpoint, trans))
1131 goto try_again_later;
1134 /* Ring the doorbell if we've got a full batch */
1135 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1136 gsi_trans_commit(trans, doorbell);
1139 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1144 gsi_trans_free(trans);
1145 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1147 /* Whenever a receive buffer transaction completes we'll try to
1148 * replenish again. It's unlikely, but if we fail to supply even
1149 * one buffer, nothing will trigger another replenish attempt.
1150 * If the hardware has no receive buffers queued, schedule work to
1151 * try replenishing again.
1153 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1154 schedule_delayed_work(&endpoint->replenish_work,
1155 msecs_to_jiffies(1));
1158 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1160 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1162 /* Start replenishing if hardware currently has no buffers */
1163 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1164 ipa_endpoint_replenish(endpoint);
1167 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1169 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1172 static void ipa_endpoint_replenish_work(struct work_struct *work)
1174 struct delayed_work *dwork = to_delayed_work(work);
1175 struct ipa_endpoint *endpoint;
1177 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1179 ipa_endpoint_replenish(endpoint);
1182 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1183 void *data, u32 len, u32 extra)
1185 struct sk_buff *skb;
1187 if (!endpoint->netdev)
1190 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1192 /* Copy the data into the socket buffer and receive it */
1194 memcpy(skb->data, data, len);
1195 skb->truesize += extra;
1198 ipa_modem_skb_rx(endpoint->netdev, skb);
1201 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1202 struct page *page, u32 len)
1204 u32 buffer_size = endpoint->config.rx.buffer_size;
1205 struct sk_buff *skb;
1207 /* Nothing to do if there's no netdev */
1208 if (!endpoint->netdev)
1211 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1213 skb = build_skb(page_address(page), buffer_size);
1215 /* Reserve the headroom and account for the data */
1216 skb_reserve(skb, NET_SKB_PAD);
1220 /* Receive the buffer (or record drop if unable to build it) */
1221 ipa_modem_skb_rx(endpoint->netdev, skb);
1226 /* The format of a packet status element is the same for several status
1227 * types (opcodes). Other types aren't currently supported.
1229 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1232 case IPA_STATUS_OPCODE_PACKET:
1233 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1234 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1235 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1242 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1243 const struct ipa_status *status)
1247 if (!ipa_status_format_packet(status->opcode))
1249 if (!status->pkt_len)
1251 endpoint_id = u8_get_bits(status->endp_dst_idx,
1252 IPA_STATUS_DST_IDX_FMASK);
1253 if (endpoint_id != endpoint->endpoint_id)
1256 return false; /* Don't skip this packet, process it */
1259 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1260 const struct ipa_status *status)
1262 struct ipa_endpoint *command_endpoint;
1263 struct ipa *ipa = endpoint->ipa;
1266 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1267 return false; /* No valid tag */
1269 /* The status contains a valid tag. We know the packet was sent to
1270 * this endpoint (already verified by ipa_endpoint_status_skip()).
1271 * If the packet came from the AP->command TX endpoint we know
1272 * this packet was sent as part of the pipeline clear process.
1274 endpoint_id = u8_get_bits(status->endp_src_idx,
1275 IPA_STATUS_SRC_IDX_FMASK);
1276 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1277 if (endpoint_id == command_endpoint->endpoint_id) {
1278 complete(&ipa->completion);
1280 dev_err(&ipa->pdev->dev,
1281 "unexpected tagged packet from endpoint %u\n",
1288 /* Return whether the status indicates the packet should be dropped */
1289 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1290 const struct ipa_status *status)
1294 /* If the status indicates a tagged transfer, we'll drop the packet */
1295 if (ipa_endpoint_status_tag(endpoint, status))
1298 /* Deaggregation exceptions we drop; all other types we consume */
1299 if (status->exception)
1300 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1302 /* Drop the packet if it fails to match a routing rule; otherwise no */
1303 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1305 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1308 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1309 struct page *page, u32 total_len)
1311 u32 buffer_size = endpoint->config.rx.buffer_size;
1312 void *data = page_address(page) + NET_SKB_PAD;
1313 u32 unused = buffer_size - total_len;
1314 u32 resid = total_len;
1317 const struct ipa_status *status = data;
1321 if (resid < sizeof(*status)) {
1322 dev_err(&endpoint->ipa->pdev->dev,
1323 "short message (%u bytes < %zu byte status)\n",
1324 resid, sizeof(*status));
1328 /* Skip over status packets that lack packet data */
1329 if (ipa_endpoint_status_skip(endpoint, status)) {
1330 data += sizeof(*status);
1331 resid -= sizeof(*status);
1335 /* Compute the amount of buffer space consumed by the packet,
1336 * including the status element. If the hardware is configured
1337 * to pad packet data to an aligned boundary, account for that.
1338 * And if checksum offload is enabled a trailer containing
1339 * computed checksum information will be appended.
1341 align = endpoint->config.rx.pad_align ? : 1;
1342 len = le16_to_cpu(status->pkt_len);
1343 len = sizeof(*status) + ALIGN(len, align);
1344 if (endpoint->config.checksum)
1345 len += sizeof(struct rmnet_map_dl_csum_trailer);
1347 if (!ipa_endpoint_status_drop(endpoint, status)) {
1352 /* Client receives only packet data (no status) */
1353 data2 = data + sizeof(*status);
1354 len2 = le16_to_cpu(status->pkt_len);
1356 /* Have the true size reflect the extra unused space in
1357 * the original receive buffer. Distribute the "cost"
1358 * proportionately across all aggregated packets in the
1361 extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1362 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1365 /* Consume status and the full packet it describes */
1371 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1372 struct gsi_trans *trans)
1376 if (endpoint->toward_ipa)
1379 if (trans->cancelled)
1382 /* Parse or build a socket buffer using the actual received length */
1384 if (endpoint->config.status_enable)
1385 ipa_endpoint_status_parse(endpoint, page, trans->len);
1386 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1387 trans->data = NULL; /* Pages have been consumed */
1389 ipa_endpoint_replenish(endpoint);
1392 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1393 struct gsi_trans *trans)
1395 if (endpoint->toward_ipa) {
1396 struct ipa *ipa = endpoint->ipa;
1398 /* Nothing to do for command transactions */
1399 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1400 struct sk_buff *skb = trans->data;
1403 dev_kfree_skb_any(skb);
1406 struct page *page = trans->data;
1413 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1417 /* ROUTE_DIS is 0 */
1418 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1419 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1420 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1421 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1422 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1424 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1427 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1429 ipa_endpoint_default_route_set(ipa, 0);
1433 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1434 * @endpoint: Endpoint to be reset
1436 * If aggregation is active on an RX endpoint when a reset is performed
1437 * on its underlying GSI channel, a special sequence of actions must be
1438 * taken to ensure the IPA pipeline is properly cleared.
1440 * Return: 0 if successful, or a negative error code
1442 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1444 struct device *dev = &endpoint->ipa->pdev->dev;
1445 struct ipa *ipa = endpoint->ipa;
1446 struct gsi *gsi = &ipa->gsi;
1447 bool suspended = false;
1454 virt = kzalloc(len, GFP_KERNEL);
1458 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1459 if (dma_mapping_error(dev, addr)) {
1464 /* Force close aggregation before issuing the reset */
1465 ipa_endpoint_force_close(endpoint);
1467 /* Reset and reconfigure the channel with the doorbell engine
1468 * disabled. Then poll until we know aggregation is no longer
1469 * active. We'll re-enable the doorbell (if appropriate) when
1470 * we reset again below.
1472 gsi_channel_reset(gsi, endpoint->channel_id, false);
1474 /* Make sure the channel isn't suspended */
1475 suspended = ipa_endpoint_program_suspend(endpoint, false);
1477 /* Start channel and do a 1 byte read */
1478 ret = gsi_channel_start(gsi, endpoint->channel_id);
1480 goto out_suspend_again;
1482 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1484 goto err_endpoint_stop;
1486 /* Wait for aggregation to be closed on the channel */
1487 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1489 if (!ipa_endpoint_aggr_active(endpoint))
1491 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1492 } while (retries--);
1494 /* Check one last time */
1495 if (ipa_endpoint_aggr_active(endpoint))
1496 dev_err(dev, "endpoint %u still active during reset\n",
1497 endpoint->endpoint_id);
1499 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1501 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1503 goto out_suspend_again;
1505 /* Finally, reset and reconfigure the channel again (re-enabling
1506 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1507 * complete the channel reset sequence. Finish by suspending the
1508 * channel again (if necessary).
1510 gsi_channel_reset(gsi, endpoint->channel_id, true);
1512 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1514 goto out_suspend_again;
1517 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1520 (void)ipa_endpoint_program_suspend(endpoint, true);
1521 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1528 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1530 u32 channel_id = endpoint->channel_id;
1531 struct ipa *ipa = endpoint->ipa;
1535 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1536 * is active, we need to handle things specially to recover.
1537 * All other cases just need to reset the underlying GSI channel.
1539 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1540 endpoint->config.aggregation;
1541 if (special && ipa_endpoint_aggr_active(endpoint))
1542 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1544 gsi_channel_reset(&ipa->gsi, channel_id, true);
1547 dev_err(&ipa->pdev->dev,
1548 "error %d resetting channel %u for endpoint %u\n",
1549 ret, endpoint->channel_id, endpoint->endpoint_id);
1552 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1554 if (endpoint->toward_ipa) {
1555 /* Newer versions of IPA use GSI channel flow control
1556 * instead of endpoint DELAY mode to prevent sending data.
1557 * Flow control is disabled for newly-allocated channels,
1558 * and we can assume flow control is not (ever) enabled
1559 * for AP TX channels.
1561 if (endpoint->ipa->version < IPA_VERSION_4_2)
1562 ipa_endpoint_program_delay(endpoint, false);
1564 /* Ensure suspend mode is off on all AP RX endpoints */
1565 (void)ipa_endpoint_program_suspend(endpoint, false);
1567 ipa_endpoint_init_cfg(endpoint);
1568 ipa_endpoint_init_nat(endpoint);
1569 ipa_endpoint_init_hdr(endpoint);
1570 ipa_endpoint_init_hdr_ext(endpoint);
1571 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1572 ipa_endpoint_init_mode(endpoint);
1573 ipa_endpoint_init_aggr(endpoint);
1574 if (!endpoint->toward_ipa) {
1575 if (endpoint->config.rx.holb_drop)
1576 ipa_endpoint_init_hol_block_enable(endpoint, 0);
1578 ipa_endpoint_init_hol_block_disable(endpoint);
1580 ipa_endpoint_init_deaggr(endpoint);
1581 ipa_endpoint_init_rsrc_grp(endpoint);
1582 ipa_endpoint_init_seq(endpoint);
1583 ipa_endpoint_status(endpoint);
1586 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1588 struct ipa *ipa = endpoint->ipa;
1589 struct gsi *gsi = &ipa->gsi;
1592 ret = gsi_channel_start(gsi, endpoint->channel_id);
1594 dev_err(&ipa->pdev->dev,
1595 "error %d starting %cX channel %u for endpoint %u\n",
1596 ret, endpoint->toward_ipa ? 'T' : 'R',
1597 endpoint->channel_id, endpoint->endpoint_id);
1601 if (!endpoint->toward_ipa) {
1602 ipa_interrupt_suspend_enable(ipa->interrupt,
1603 endpoint->endpoint_id);
1604 ipa_endpoint_replenish_enable(endpoint);
1607 ipa->enabled |= BIT(endpoint->endpoint_id);
1612 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1614 u32 mask = BIT(endpoint->endpoint_id);
1615 struct ipa *ipa = endpoint->ipa;
1616 struct gsi *gsi = &ipa->gsi;
1619 if (!(ipa->enabled & mask))
1622 ipa->enabled ^= mask;
1624 if (!endpoint->toward_ipa) {
1625 ipa_endpoint_replenish_disable(endpoint);
1626 ipa_interrupt_suspend_disable(ipa->interrupt,
1627 endpoint->endpoint_id);
1630 /* Note that if stop fails, the channel's state is not well-defined */
1631 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1633 dev_err(&ipa->pdev->dev,
1634 "error %d attempting to stop endpoint %u\n", ret,
1635 endpoint->endpoint_id);
1638 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1640 struct device *dev = &endpoint->ipa->pdev->dev;
1641 struct gsi *gsi = &endpoint->ipa->gsi;
1644 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1647 if (!endpoint->toward_ipa) {
1648 ipa_endpoint_replenish_disable(endpoint);
1649 (void)ipa_endpoint_program_suspend(endpoint, true);
1652 ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1654 dev_err(dev, "error %d suspending channel %u\n", ret,
1655 endpoint->channel_id);
1658 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1660 struct device *dev = &endpoint->ipa->pdev->dev;
1661 struct gsi *gsi = &endpoint->ipa->gsi;
1664 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1667 if (!endpoint->toward_ipa)
1668 (void)ipa_endpoint_program_suspend(endpoint, false);
1670 ret = gsi_channel_resume(gsi, endpoint->channel_id);
1672 dev_err(dev, "error %d resuming channel %u\n", ret,
1673 endpoint->channel_id);
1674 else if (!endpoint->toward_ipa)
1675 ipa_endpoint_replenish_enable(endpoint);
1678 void ipa_endpoint_suspend(struct ipa *ipa)
1680 if (!ipa->setup_complete)
1683 if (ipa->modem_netdev)
1684 ipa_modem_suspend(ipa->modem_netdev);
1686 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1687 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1690 void ipa_endpoint_resume(struct ipa *ipa)
1692 if (!ipa->setup_complete)
1695 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1696 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1698 if (ipa->modem_netdev)
1699 ipa_modem_resume(ipa->modem_netdev);
1702 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1704 struct gsi *gsi = &endpoint->ipa->gsi;
1705 u32 channel_id = endpoint->channel_id;
1707 /* Only AP endpoints get set up */
1708 if (endpoint->ee_id != GSI_EE_AP)
1711 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1712 if (!endpoint->toward_ipa) {
1713 /* RX transactions require a single TRE, so the maximum
1714 * backlog is the same as the maximum outstanding TREs.
1716 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1717 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1718 INIT_DELAYED_WORK(&endpoint->replenish_work,
1719 ipa_endpoint_replenish_work);
1722 ipa_endpoint_program(endpoint);
1724 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1727 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1729 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1731 if (!endpoint->toward_ipa)
1732 cancel_delayed_work_sync(&endpoint->replenish_work);
1734 ipa_endpoint_reset(endpoint);
1737 void ipa_endpoint_setup(struct ipa *ipa)
1739 u32 initialized = ipa->initialized;
1742 while (initialized) {
1743 u32 endpoint_id = __ffs(initialized);
1745 initialized ^= BIT(endpoint_id);
1747 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1751 void ipa_endpoint_teardown(struct ipa *ipa)
1753 u32 set_up = ipa->set_up;
1756 u32 endpoint_id = __fls(set_up);
1758 set_up ^= BIT(endpoint_id);
1760 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1765 int ipa_endpoint_config(struct ipa *ipa)
1767 struct device *dev = &ipa->pdev->dev;
1776 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1777 * Furthermore, the endpoints were not grouped such that TX
1778 * endpoint numbers started with 0 and RX endpoints had numbers
1779 * higher than all TX endpoints, so we can't do the simple
1780 * direction check used for newer hardware below.
1782 * For hardware that doesn't support the FLAVOR_0 register,
1783 * just set the available mask to support any endpoint, and
1784 * assume the configuration is valid.
1786 if (ipa->version < IPA_VERSION_3_5) {
1787 ipa->available = ~0;
1791 /* Find out about the endpoints supplied by the hardware, and ensure
1792 * the highest one doesn't exceed the number we support.
1794 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1796 /* Our RX is an IPA producer */
1797 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1798 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1799 if (max > IPA_ENDPOINT_MAX) {
1800 dev_err(dev, "too many endpoints (%u > %u)\n",
1801 max, IPA_ENDPOINT_MAX);
1804 rx_mask = GENMASK(max - 1, rx_base);
1806 /* Our TX is an IPA consumer */
1807 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1808 tx_mask = GENMASK(max - 1, 0);
1810 ipa->available = rx_mask | tx_mask;
1812 /* Check for initialized endpoints not supported by the hardware */
1813 if (ipa->initialized & ~ipa->available) {
1814 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1815 ipa->initialized & ~ipa->available);
1816 ret = -EINVAL; /* Report other errors too */
1819 initialized = ipa->initialized;
1820 while (initialized) {
1821 u32 endpoint_id = __ffs(initialized);
1822 struct ipa_endpoint *endpoint;
1824 initialized ^= BIT(endpoint_id);
1826 /* Make sure it's pointing in the right direction */
1827 endpoint = &ipa->endpoint[endpoint_id];
1828 if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1829 dev_err(dev, "endpoint id %u wrong direction\n",
1838 void ipa_endpoint_deconfig(struct ipa *ipa)
1840 ipa->available = 0; /* Nothing more to do */
1843 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1844 const struct ipa_gsi_endpoint_data *data)
1846 struct ipa_endpoint *endpoint;
1848 endpoint = &ipa->endpoint[data->endpoint_id];
1850 if (data->ee_id == GSI_EE_AP)
1851 ipa->channel_map[data->channel_id] = endpoint;
1852 ipa->name_map[name] = endpoint;
1854 endpoint->ipa = ipa;
1855 endpoint->ee_id = data->ee_id;
1856 endpoint->channel_id = data->channel_id;
1857 endpoint->endpoint_id = data->endpoint_id;
1858 endpoint->toward_ipa = data->toward_ipa;
1859 endpoint->config = data->endpoint.config;
1861 ipa->initialized |= BIT(endpoint->endpoint_id);
1864 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1866 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1868 memset(endpoint, 0, sizeof(*endpoint));
1871 void ipa_endpoint_exit(struct ipa *ipa)
1873 u32 initialized = ipa->initialized;
1875 while (initialized) {
1876 u32 endpoint_id = __fls(initialized);
1878 initialized ^= BIT(endpoint_id);
1880 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1882 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1883 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1886 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1887 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1888 const struct ipa_gsi_endpoint_data *data)
1890 enum ipa_endpoint_name name;
1893 BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1895 if (!ipa_endpoint_data_valid(ipa, count, data))
1896 return 0; /* Error */
1898 ipa->initialized = 0;
1901 for (name = 0; name < count; name++, data++) {
1902 if (ipa_gsi_endpoint_data_empty(data))
1903 continue; /* Skip over empty slots */
1905 ipa_endpoint_init_one(ipa, name, data);
1907 if (data->endpoint.filter_support)
1908 filter_map |= BIT(data->endpoint_id);
1909 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
1910 ipa->modem_tx_count++;
1913 if (!ipa_filter_map_valid(ipa, filter_map))
1914 goto err_endpoint_exit;
1916 return filter_map; /* Non-zero bitmask */
1919 ipa_endpoint_exit(ipa);
1921 return 0; /* Error */