1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2022 Marvell.
10 static struct mcs_ops cnf10kb_mcs_ops = {
11 .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
12 .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
13 .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
14 .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
15 .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
16 .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
17 .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
20 struct mcs_ops *cnf10kb_get_mac_ops(void)
22 return &cnf10kb_mcs_ops;
25 void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
27 struct hwinfo *hw = mcs->hw;
29 hw->tcam_entries = 64; /* TCAM entries */
30 hw->secy_entries = 64; /* SecY entries */
31 hw->sc_entries = 64; /* SC CAM entries */
32 hw->sa_entries = 128; /* SA entries */
33 hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
34 hw->mcs_x2p_intf = 1; /* x2p clabration intf */
35 hw->mcs_blks = 7; /* MCS blocks */
36 hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
39 void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
44 val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
46 reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
47 mcs_reg_write(mcs, reg, val);
49 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
50 mcs_reg_write(mcs, reg, val);
53 val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
56 reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
57 mcs_reg_write(mcs, reg, val);
60 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
61 mcs_reg_write(mcs, reg, val);
63 /* Enable custom tage 0 and 1 and sectag */
64 val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
66 reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
67 mcs_reg_write(mcs, reg, val);
69 reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
70 mcs_reg_write(mcs, reg, val);
73 void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
77 val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
79 reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
81 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
82 mcs_reg_write(mcs, reg, map->sci);
83 val |= (map->sc & 0x3F) << 7;
84 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
87 mcs_reg_write(mcs, reg, val);
90 void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
94 val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
96 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
97 mcs_reg_write(mcs, reg, val);
99 reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
100 val = mcs_reg_read(mcs, reg);
103 val |= BIT_ULL(map->sc_id);
105 val &= ~BIT_ULL(map->sc_id);
107 mcs_reg_write(mcs, reg, val);
109 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
110 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
112 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
115 void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
119 val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
121 reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
122 mcs_reg_write(mcs, reg, val);
125 int mcs_set_force_clk_en(struct mcs *mcs, bool set)
127 unsigned long timeout = jiffies + usecs_to_jiffies(2000);
130 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
134 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
136 /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
137 while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
138 if (time_after(jiffies, timeout)) {
139 dev_err(mcs->dev, "MCS set force clk enable failed\n");
145 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
151 /* TX SA interrupt is raised only if autorekey is enabled.
152 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
153 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
154 * SA in SA_index1 got expired else SA in SA_index0 got expired.
156 void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
158 struct mcs_intr_event event;
159 struct rsrc_bmap *sc_bmap;
160 unsigned long rekey_ena;
164 sc_bmap = &mcs->tx.sc;
166 event.mcs_id = mcs->mcs_id;
167 event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
169 rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
171 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
172 /* Auto rekey is enable */
173 if (!test_bit(sc, &rekey_ena))
175 sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
176 /* Check if tx_sa_active status had changed */
177 if (sa_status == mcs->tx_sa_active[sc])
180 /* SA_index0 is expired */
181 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
183 event.sa_id = val & 0x7F;
185 event.sa_id = (val >> 7) & 0x7F;
187 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
188 mcs_add_intr_wq_entry(mcs, &event);
192 void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
194 struct mcs_intr_event event = { 0 };
195 struct rsrc_bmap *sc_bmap;
199 sc_bmap = &mcs->tx.sc;
201 event.mcs_id = mcs->mcs_id;
202 event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
204 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
205 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
207 if (mcs->tx_sa_active[sc])
208 /* SA_index1 was used and got expired */
209 event.sa_id = (val >> 7) & 0x7F;
211 /* SA_index0 was used and got expired */
212 event.sa_id = val & 0x7F;
214 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
215 mcs_add_intr_wq_entry(mcs, &event);
219 void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
220 enum mcs_direction dir)
222 struct mcs_intr_event event = { 0 };
225 if (!(intr & MCS_BBE_INT_MASK))
228 event.mcs_id = mcs->mcs_id;
229 event.pcifunc = mcs->pf_map[0];
231 for (i = 0; i < MCS_MAX_BBE_INT; i++) {
232 if (!(intr & BIT_ULL(i)))
235 /* Lower nibble denotes data fifo overflow interrupts and
236 * upper nibble indicates policy fifo overflow interrupts.
239 event.intr_mask = (dir == MCS_RX) ?
240 MCS_BBE_RX_DFIFO_OVERFLOW_INT :
241 MCS_BBE_TX_DFIFO_OVERFLOW_INT;
243 event.intr_mask = (dir == MCS_RX) ?
244 MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
245 MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
247 /* Notify the lmac_id info which ran into BBE fatal error */
248 event.lmac_id = i & 0x3ULL;
249 mcs_add_intr_wq_entry(mcs, &event);
253 void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
254 enum mcs_direction dir)
256 struct mcs_intr_event event = { 0 };
259 if (!(intr & MCS_PAB_INT_MASK))
262 event.mcs_id = mcs->mcs_id;
263 event.pcifunc = mcs->pf_map[0];
265 for (i = 0; i < MCS_MAX_PAB_INT; i++) {
266 if (!(intr & BIT_ULL(i)))
269 event.intr_mask = (dir == MCS_RX) ?
270 MCS_PAB_RX_CHAN_OVERFLOW_INT :
271 MCS_PAB_TX_CHAN_OVERFLOW_INT;
273 /* Notify the lmac_id info which ran into PAB fatal error */
275 mcs_add_intr_wq_entry(mcs, &event);