]> Git Repo - linux.git/blob - drivers/net/wan/fsl_ucc_hdlc.c
Merge tag 'ceph-for-4.15-rc1' of git://github.com/ceph/ceph-client
[linux.git] / drivers / net / wan / fsl_ucc_hdlc.c
1 /* Freescale QUICC Engine HDLC Device Driver
2  *
3  * Copyright 2016 Freescale Semiconductor Inc.
4  *
5  * This program is free software; you can redistribute  it and/or modify it
6  * under  the terms of  the GNU General  Public License as published by the
7  * Free Software Foundation;  either version 2 of the  License, or (at your
8  * option) any later version.
9  */
10
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
32
33 #include "fsl_ucc_hdlc.h"
34
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
37
38 #define TDM_PPPOHT_SLIC_MAXIN
39
40 static struct ucc_tdm_info utdm_primary_info = {
41         .uf_info = {
42                 .tsa = 0,
43                 .cdp = 0,
44                 .cds = 1,
45                 .ctsp = 1,
46                 .ctss = 1,
47                 .revd = 0,
48                 .urfs = 256,
49                 .utfs = 256,
50                 .urfet = 128,
51                 .urfset = 192,
52                 .utfet = 128,
53                 .utftt = 0x40,
54                 .ufpt = 256,
55                 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56                 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57                 .tenc = UCC_FAST_TX_ENCODING_NRZ,
58                 .renc = UCC_FAST_RX_ENCODING_NRZ,
59                 .tcrc = UCC_FAST_16_BIT_CRC,
60                 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
61         },
62
63         .si_info = {
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
65                 .simr_rfsd = 1,
66                 .simr_tfsd = 2,
67 #else
68                 .simr_rfsd = 0,
69                 .simr_tfsd = 0,
70 #endif
71                 .simr_crt = 0,
72                 .simr_sl = 0,
73                 .simr_ce = 1,
74                 .simr_fe = 1,
75                 .simr_gm = 0,
76         },
77 };
78
79 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
80
81 static int uhdlc_init(struct ucc_hdlc_private *priv)
82 {
83         struct ucc_tdm_info *ut_info;
84         struct ucc_fast_info *uf_info;
85         u32 cecr_subblock;
86         u16 bd_status;
87         int ret, i;
88         void *bd_buffer;
89         dma_addr_t bd_dma_addr;
90         u32 riptr;
91         u32 tiptr;
92         u32 gumr;
93
94         ut_info = priv->ut_info;
95         uf_info = &ut_info->uf_info;
96
97         if (priv->tsa) {
98                 uf_info->tsa = 1;
99                 uf_info->ctsp = 1;
100         }
101
102         /* This sets HPM register in CMXUCR register which configures a
103          * open drain connected HDLC bus
104          */
105         if (priv->hdlc_bus)
106                 uf_info->brkpt_support = 1;
107
108         uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
109                                 UCC_HDLC_UCCE_TXB) << 16);
110
111         ret = ucc_fast_init(uf_info, &priv->uccf);
112         if (ret) {
113                 dev_err(priv->dev, "Failed to init uccf.");
114                 return ret;
115         }
116
117         priv->uf_regs = priv->uccf->uf_regs;
118         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
119
120         /* Loopback mode */
121         if (priv->loopback) {
122                 dev_info(priv->dev, "Loopback Mode\n");
123                 /* use the same clock when work in loopback */
124                 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
125
126                 gumr = ioread32be(&priv->uf_regs->gumr);
127                 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
128                          UCC_FAST_GUMR_TCI);
129                 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
130                 iowrite32be(gumr, &priv->uf_regs->gumr);
131         }
132
133         /* Initialize SI */
134         if (priv->tsa)
135                 ucc_tdm_init(priv->utdm, priv->ut_info);
136
137         /* Write to QE CECR, UCCx channel to Stop Transmission */
138         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
139         ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
140                            QE_CR_PROTOCOL_UNSPECIFIED, 0);
141
142         /* Set UPSMR normal mode (need fixed)*/
143         iowrite32be(0, &priv->uf_regs->upsmr);
144
145         /* hdlc_bus mode */
146         if (priv->hdlc_bus) {
147                 u32 upsmr;
148
149                 dev_info(priv->dev, "HDLC bus Mode\n");
150                 upsmr = ioread32be(&priv->uf_regs->upsmr);
151
152                 /* bus mode and retransmit enable, with collision window
153                  * set to 8 bytes
154                  */
155                 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
156                                 UCC_HDLC_UPSMR_CW8;
157                 iowrite32be(upsmr, &priv->uf_regs->upsmr);
158
159                 /* explicitly disable CDS & CTSP */
160                 gumr = ioread32be(&priv->uf_regs->gumr);
161                 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
162                 /* set automatic sync to explicitly ignore CD signal */
163                 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
164                 iowrite32be(gumr, &priv->uf_regs->gumr);
165         }
166
167         priv->rx_ring_size = RX_BD_RING_LEN;
168         priv->tx_ring_size = TX_BD_RING_LEN;
169         /* Alloc Rx BD */
170         priv->rx_bd_base = dma_alloc_coherent(priv->dev,
171                         RX_BD_RING_LEN * sizeof(struct qe_bd),
172                         &priv->dma_rx_bd, GFP_KERNEL);
173
174         if (!priv->rx_bd_base) {
175                 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
176                 ret = -ENOMEM;
177                 goto free_uccf;
178         }
179
180         /* Alloc Tx BD */
181         priv->tx_bd_base = dma_alloc_coherent(priv->dev,
182                         TX_BD_RING_LEN * sizeof(struct qe_bd),
183                         &priv->dma_tx_bd, GFP_KERNEL);
184
185         if (!priv->tx_bd_base) {
186                 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
187                 ret = -ENOMEM;
188                 goto free_rx_bd;
189         }
190
191         /* Alloc parameter ram for ucc hdlc */
192         priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
193                                 ALIGNMENT_OF_UCC_HDLC_PRAM);
194
195         if (priv->ucc_pram_offset < 0) {
196                 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
197                 ret = -ENOMEM;
198                 goto free_tx_bd;
199         }
200
201         priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
202                                   GFP_KERNEL);
203         if (!priv->rx_skbuff)
204                 goto free_ucc_pram;
205
206         priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
207                                   GFP_KERNEL);
208         if (!priv->tx_skbuff)
209                 goto free_rx_skbuff;
210
211         priv->skb_curtx = 0;
212         priv->skb_dirtytx = 0;
213         priv->curtx_bd = priv->tx_bd_base;
214         priv->dirty_tx = priv->tx_bd_base;
215         priv->currx_bd = priv->rx_bd_base;
216         priv->currx_bdnum = 0;
217
218         /* init parameter base */
219         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
220         ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
221                            QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
222
223         priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
224                                         qe_muram_addr(priv->ucc_pram_offset);
225
226         /* Zero out parameter ram */
227         memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
228
229         /* Alloc riptr, tiptr */
230         riptr = qe_muram_alloc(32, 32);
231         if (riptr < 0) {
232                 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
233                 ret = -ENOMEM;
234                 goto free_tx_skbuff;
235         }
236
237         tiptr = qe_muram_alloc(32, 32);
238         if (tiptr < 0) {
239                 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
240                 ret = -ENOMEM;
241                 goto free_riptr;
242         }
243
244         /* Set RIPTR, TIPTR */
245         iowrite16be(riptr, &priv->ucc_pram->riptr);
246         iowrite16be(tiptr, &priv->ucc_pram->tiptr);
247
248         /* Set MRBLR */
249         iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
250
251         /* Set RBASE, TBASE */
252         iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
253         iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
254
255         /* Set RSTATE, TSTATE */
256         iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
257         iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
258
259         /* Set C_MASK, C_PRES for 16bit CRC */
260         iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
261         iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
262
263         iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
264         iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
265         iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
266         iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
267         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
268         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
269         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
270         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
271
272         /* Get BD buffer */
273         bd_buffer = dma_alloc_coherent(priv->dev,
274                                        (RX_BD_RING_LEN + TX_BD_RING_LEN) *
275                                        MAX_RX_BUF_LENGTH,
276                                        &bd_dma_addr, GFP_KERNEL);
277
278         if (!bd_buffer) {
279                 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
280                 ret = -ENOMEM;
281                 goto free_tiptr;
282         }
283
284         memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
285                         * MAX_RX_BUF_LENGTH);
286
287         priv->rx_buffer = bd_buffer;
288         priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
289
290         priv->dma_rx_addr = bd_dma_addr;
291         priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
292
293         for (i = 0; i < RX_BD_RING_LEN; i++) {
294                 if (i < (RX_BD_RING_LEN - 1))
295                         bd_status = R_E_S | R_I_S;
296                 else
297                         bd_status = R_E_S | R_I_S | R_W_S;
298
299                 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
300                 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
301                             &priv->rx_bd_base[i].buf);
302         }
303
304         for (i = 0; i < TX_BD_RING_LEN; i++) {
305                 if (i < (TX_BD_RING_LEN - 1))
306                         bd_status =  T_I_S | T_TC_S;
307                 else
308                         bd_status =  T_I_S | T_TC_S | T_W_S;
309
310                 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
311                 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
312                             &priv->tx_bd_base[i].buf);
313         }
314
315         return 0;
316
317 free_tiptr:
318         qe_muram_free(tiptr);
319 free_riptr:
320         qe_muram_free(riptr);
321 free_tx_skbuff:
322         kfree(priv->tx_skbuff);
323 free_rx_skbuff:
324         kfree(priv->rx_skbuff);
325 free_ucc_pram:
326         qe_muram_free(priv->ucc_pram_offset);
327 free_tx_bd:
328         dma_free_coherent(priv->dev,
329                           TX_BD_RING_LEN * sizeof(struct qe_bd),
330                           priv->tx_bd_base, priv->dma_tx_bd);
331 free_rx_bd:
332         dma_free_coherent(priv->dev,
333                           RX_BD_RING_LEN * sizeof(struct qe_bd),
334                           priv->rx_bd_base, priv->dma_rx_bd);
335 free_uccf:
336         ucc_fast_free(priv->uccf);
337
338         return ret;
339 }
340
341 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
342 {
343         hdlc_device *hdlc = dev_to_hdlc(dev);
344         struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
345         struct qe_bd __iomem *bd;
346         u16 bd_status;
347         unsigned long flags;
348         u16 *proto_head;
349
350         switch (dev->type) {
351         case ARPHRD_RAWHDLC:
352                 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
353                         dev->stats.tx_dropped++;
354                         dev_kfree_skb(skb);
355                         netdev_err(dev, "No enough space for hdlc head\n");
356                         return -ENOMEM;
357                 }
358
359                 skb_push(skb, HDLC_HEAD_LEN);
360
361                 proto_head = (u16 *)skb->data;
362                 *proto_head = htons(DEFAULT_HDLC_HEAD);
363
364                 dev->stats.tx_bytes += skb->len;
365                 break;
366
367         case ARPHRD_PPP:
368                 proto_head = (u16 *)skb->data;
369                 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
370                         dev->stats.tx_dropped++;
371                         dev_kfree_skb(skb);
372                         netdev_err(dev, "Wrong ppp header\n");
373                         return -ENOMEM;
374                 }
375
376                 dev->stats.tx_bytes += skb->len;
377                 break;
378
379         default:
380                 dev->stats.tx_dropped++;
381                 dev_kfree_skb(skb);
382                 return -ENOMEM;
383         }
384         spin_lock_irqsave(&priv->lock, flags);
385
386         /* Start from the next BD that should be filled */
387         bd = priv->curtx_bd;
388         bd_status = ioread16be(&bd->status);
389         /* Save the skb pointer so we can free it later */
390         priv->tx_skbuff[priv->skb_curtx] = skb;
391
392         /* Update the current skb pointer (wrapping if this was the last) */
393         priv->skb_curtx =
394             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
395
396         /* copy skb data to tx buffer for sdma processing */
397         memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
398                skb->data, skb->len);
399
400         /* set bd status and length */
401         bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
402
403         iowrite16be(skb->len, &bd->length);
404         iowrite16be(bd_status, &bd->status);
405
406         /* Move to next BD in the ring */
407         if (!(bd_status & T_W_S))
408                 bd += 1;
409         else
410                 bd = priv->tx_bd_base;
411
412         if (bd == priv->dirty_tx) {
413                 if (!netif_queue_stopped(dev))
414                         netif_stop_queue(dev);
415         }
416
417         priv->curtx_bd = bd;
418
419         spin_unlock_irqrestore(&priv->lock, flags);
420
421         return NETDEV_TX_OK;
422 }
423
424 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
425 {
426         /* Start from the next BD that should be filled */
427         struct net_device *dev = priv->ndev;
428         struct qe_bd *bd;               /* BD pointer */
429         u16 bd_status;
430
431         bd = priv->dirty_tx;
432         bd_status = ioread16be(&bd->status);
433
434         /* Normal processing. */
435         while ((bd_status & T_R_S) == 0) {
436                 struct sk_buff *skb;
437
438                 /* BD contains already transmitted buffer.   */
439                 /* Handle the transmitted buffer and release */
440                 /* the BD to be used with the current frame  */
441
442                 skb = priv->tx_skbuff[priv->skb_dirtytx];
443                 if (!skb)
444                         break;
445                 dev->stats.tx_packets++;
446                 memset(priv->tx_buffer +
447                        (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
448                        0, skb->len);
449                 dev_kfree_skb_irq(skb);
450
451                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
452                 priv->skb_dirtytx =
453                     (priv->skb_dirtytx +
454                      1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
455
456                 /* We freed a buffer, so now we can restart transmission */
457                 if (netif_queue_stopped(dev))
458                         netif_wake_queue(dev);
459
460                 /* Advance the confirmation BD pointer */
461                 if (!(bd_status & T_W_S))
462                         bd += 1;
463                 else
464                         bd = priv->tx_bd_base;
465                 bd_status = ioread16be(&bd->status);
466         }
467         priv->dirty_tx = bd;
468
469         return 0;
470 }
471
472 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
473 {
474         struct net_device *dev = priv->ndev;
475         struct sk_buff *skb = NULL;
476         hdlc_device *hdlc = dev_to_hdlc(dev);
477         struct qe_bd *bd;
478         u16 bd_status;
479         u16 length, howmany = 0;
480         u8 *bdbuffer;
481
482         bd = priv->currx_bd;
483         bd_status = ioread16be(&bd->status);
484
485         /* while there are received buffers and BD is full (~R_E) */
486         while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
487                 if (bd_status & R_OV_S)
488                         dev->stats.rx_over_errors++;
489                 if (bd_status & R_CR_S) {
490                         dev->stats.rx_crc_errors++;
491                         dev->stats.rx_dropped++;
492                         goto recycle;
493                 }
494                 bdbuffer = priv->rx_buffer +
495                         (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
496                 length = ioread16be(&bd->length);
497
498                 switch (dev->type) {
499                 case ARPHRD_RAWHDLC:
500                         bdbuffer += HDLC_HEAD_LEN;
501                         length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
502
503                         skb = dev_alloc_skb(length);
504                         if (!skb) {
505                                 dev->stats.rx_dropped++;
506                                 return -ENOMEM;
507                         }
508
509                         skb_put(skb, length);
510                         skb->len = length;
511                         skb->dev = dev;
512                         memcpy(skb->data, bdbuffer, length);
513                         break;
514
515                 case ARPHRD_PPP:
516                         length -= HDLC_CRC_SIZE;
517
518                         skb = dev_alloc_skb(length);
519                         if (!skb) {
520                                 dev->stats.rx_dropped++;
521                                 return -ENOMEM;
522                         }
523
524                         skb_put(skb, length);
525                         skb->len = length;
526                         skb->dev = dev;
527                         memcpy(skb->data, bdbuffer, length);
528                         break;
529                 }
530
531                 dev->stats.rx_packets++;
532                 dev->stats.rx_bytes += skb->len;
533                 howmany++;
534                 if (hdlc->proto)
535                         skb->protocol = hdlc_type_trans(skb, dev);
536                 netif_receive_skb(skb);
537
538 recycle:
539                 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
540
541                 /* update to point at the next bd */
542                 if (bd_status & R_W_S) {
543                         priv->currx_bdnum = 0;
544                         bd = priv->rx_bd_base;
545                 } else {
546                         if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
547                                 priv->currx_bdnum += 1;
548                         else
549                                 priv->currx_bdnum = RX_BD_RING_LEN - 1;
550
551                         bd += 1;
552                 }
553
554                 bd_status = ioread16be(&bd->status);
555         }
556
557         priv->currx_bd = bd;
558         return howmany;
559 }
560
561 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
562 {
563         struct ucc_hdlc_private *priv = container_of(napi,
564                                                      struct ucc_hdlc_private,
565                                                      napi);
566         int howmany;
567
568         /* Tx event processing */
569         spin_lock(&priv->lock);
570         hdlc_tx_done(priv);
571         spin_unlock(&priv->lock);
572
573         howmany = 0;
574         howmany += hdlc_rx_done(priv, budget - howmany);
575
576         if (howmany < budget) {
577                 napi_complete_done(napi, howmany);
578                 qe_setbits32(priv->uccf->p_uccm,
579                              (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
580         }
581
582         return howmany;
583 }
584
585 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
586 {
587         struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
588         struct net_device *dev = priv->ndev;
589         struct ucc_fast_private *uccf;
590         struct ucc_tdm_info *ut_info;
591         u32 ucce;
592         u32 uccm;
593
594         ut_info = priv->ut_info;
595         uccf = priv->uccf;
596
597         ucce = ioread32be(uccf->p_ucce);
598         uccm = ioread32be(uccf->p_uccm);
599         ucce &= uccm;
600         iowrite32be(ucce, uccf->p_ucce);
601         if (!ucce)
602                 return IRQ_NONE;
603
604         if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
605                 if (napi_schedule_prep(&priv->napi)) {
606                         uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
607                                   << 16);
608                         iowrite32be(uccm, uccf->p_uccm);
609                         __napi_schedule(&priv->napi);
610                 }
611         }
612
613         /* Errors and other events */
614         if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
615                 dev->stats.rx_errors++;
616         if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
617                 dev->stats.tx_errors++;
618
619         return IRQ_HANDLED;
620 }
621
622 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
623 {
624         const size_t size = sizeof(te1_settings);
625         te1_settings line;
626         struct ucc_hdlc_private *priv = netdev_priv(dev);
627
628         if (cmd != SIOCWANDEV)
629                 return hdlc_ioctl(dev, ifr, cmd);
630
631         switch (ifr->ifr_settings.type) {
632         case IF_GET_IFACE:
633                 ifr->ifr_settings.type = IF_IFACE_E1;
634                 if (ifr->ifr_settings.size < size) {
635                         ifr->ifr_settings.size = size; /* data size wanted */
636                         return -ENOBUFS;
637                 }
638                 memset(&line, 0, sizeof(line));
639                 line.clock_type = priv->clocking;
640
641                 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
642                         return -EFAULT;
643                 return 0;
644
645         default:
646                 return hdlc_ioctl(dev, ifr, cmd);
647         }
648 }
649
650 static int uhdlc_open(struct net_device *dev)
651 {
652         u32 cecr_subblock;
653         hdlc_device *hdlc = dev_to_hdlc(dev);
654         struct ucc_hdlc_private *priv = hdlc->priv;
655         struct ucc_tdm *utdm = priv->utdm;
656
657         if (priv->hdlc_busy != 1) {
658                 if (request_irq(priv->ut_info->uf_info.irq,
659                                 ucc_hdlc_irq_handler, 0, "hdlc", priv))
660                         return -ENODEV;
661
662                 cecr_subblock = ucc_fast_get_qe_cr_subblock(
663                                         priv->ut_info->uf_info.ucc_num);
664
665                 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
666                              QE_CR_PROTOCOL_UNSPECIFIED, 0);
667
668                 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
669
670                 /* Enable the TDM port */
671                 if (priv->tsa)
672                         utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
673
674                 priv->hdlc_busy = 1;
675                 netif_device_attach(priv->ndev);
676                 napi_enable(&priv->napi);
677                 netif_start_queue(dev);
678                 hdlc_open(dev);
679         }
680
681         return 0;
682 }
683
684 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
685 {
686         qe_muram_free(priv->ucc_pram->riptr);
687         qe_muram_free(priv->ucc_pram->tiptr);
688
689         if (priv->rx_bd_base) {
690                 dma_free_coherent(priv->dev,
691                                   RX_BD_RING_LEN * sizeof(struct qe_bd),
692                                   priv->rx_bd_base, priv->dma_rx_bd);
693
694                 priv->rx_bd_base = NULL;
695                 priv->dma_rx_bd = 0;
696         }
697
698         if (priv->tx_bd_base) {
699                 dma_free_coherent(priv->dev,
700                                   TX_BD_RING_LEN * sizeof(struct qe_bd),
701                                   priv->tx_bd_base, priv->dma_tx_bd);
702
703                 priv->tx_bd_base = NULL;
704                 priv->dma_tx_bd = 0;
705         }
706
707         if (priv->ucc_pram) {
708                 qe_muram_free(priv->ucc_pram_offset);
709                 priv->ucc_pram = NULL;
710                 priv->ucc_pram_offset = 0;
711          }
712
713         kfree(priv->rx_skbuff);
714         priv->rx_skbuff = NULL;
715
716         kfree(priv->tx_skbuff);
717         priv->tx_skbuff = NULL;
718
719         if (priv->uf_regs) {
720                 iounmap(priv->uf_regs);
721                 priv->uf_regs = NULL;
722         }
723
724         if (priv->uccf) {
725                 ucc_fast_free(priv->uccf);
726                 priv->uccf = NULL;
727         }
728
729         if (priv->rx_buffer) {
730                 dma_free_coherent(priv->dev,
731                                   RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
732                                   priv->rx_buffer, priv->dma_rx_addr);
733                 priv->rx_buffer = NULL;
734                 priv->dma_rx_addr = 0;
735         }
736
737         if (priv->tx_buffer) {
738                 dma_free_coherent(priv->dev,
739                                   TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
740                                   priv->tx_buffer, priv->dma_tx_addr);
741                 priv->tx_buffer = NULL;
742                 priv->dma_tx_addr = 0;
743         }
744 }
745
746 static int uhdlc_close(struct net_device *dev)
747 {
748         struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
749         struct ucc_tdm *utdm = priv->utdm;
750         u32 cecr_subblock;
751
752         napi_disable(&priv->napi);
753         cecr_subblock = ucc_fast_get_qe_cr_subblock(
754                                 priv->ut_info->uf_info.ucc_num);
755
756         qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
757                      (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
758         qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
759                      (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
760
761         if (priv->tsa)
762                 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
763
764         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
765
766         free_irq(priv->ut_info->uf_info.irq, priv);
767         netif_stop_queue(dev);
768         priv->hdlc_busy = 0;
769
770         return 0;
771 }
772
773 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
774                            unsigned short parity)
775 {
776         struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
777
778         if (encoding != ENCODING_NRZ &&
779             encoding != ENCODING_NRZI)
780                 return -EINVAL;
781
782         if (parity != PARITY_NONE &&
783             parity != PARITY_CRC32_PR1_CCITT &&
784             parity != PARITY_CRC16_PR1_CCITT)
785                 return -EINVAL;
786
787         priv->encoding = encoding;
788         priv->parity = parity;
789
790         return 0;
791 }
792
793 #ifdef CONFIG_PM
794 static void store_clk_config(struct ucc_hdlc_private *priv)
795 {
796         struct qe_mux *qe_mux_reg = &qe_immr->qmx;
797
798         /* store si clk */
799         priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
800         priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
801
802         /* store si sync */
803         priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
804
805         /* store ucc clk */
806         memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
807 }
808
809 static void resume_clk_config(struct ucc_hdlc_private *priv)
810 {
811         struct qe_mux *qe_mux_reg = &qe_immr->qmx;
812
813         memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
814
815         iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
816         iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
817
818         iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
819 }
820
821 static int uhdlc_suspend(struct device *dev)
822 {
823         struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
824         struct ucc_tdm_info *ut_info;
825         struct ucc_fast __iomem *uf_regs;
826
827         if (!priv)
828                 return -EINVAL;
829
830         if (!netif_running(priv->ndev))
831                 return 0;
832
833         netif_device_detach(priv->ndev);
834         napi_disable(&priv->napi);
835
836         ut_info = priv->ut_info;
837         uf_regs = priv->uf_regs;
838
839         /* backup gumr guemr*/
840         priv->gumr = ioread32be(&uf_regs->gumr);
841         priv->guemr = ioread8(&uf_regs->guemr);
842
843         priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
844                                         GFP_KERNEL);
845         if (!priv->ucc_pram_bak)
846                 return -ENOMEM;
847
848         /* backup HDLC parameter */
849         memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
850                       sizeof(struct ucc_hdlc_param));
851
852         /* store the clk configuration */
853         store_clk_config(priv);
854
855         /* save power */
856         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
857
858         return 0;
859 }
860
861 static int uhdlc_resume(struct device *dev)
862 {
863         struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
864         struct ucc_tdm *utdm;
865         struct ucc_tdm_info *ut_info;
866         struct ucc_fast __iomem *uf_regs;
867         struct ucc_fast_private *uccf;
868         struct ucc_fast_info *uf_info;
869         int ret, i;
870         u32 cecr_subblock;
871         u16 bd_status;
872
873         if (!priv)
874                 return -EINVAL;
875
876         if (!netif_running(priv->ndev))
877                 return 0;
878
879         utdm = priv->utdm;
880         ut_info = priv->ut_info;
881         uf_info = &ut_info->uf_info;
882         uf_regs = priv->uf_regs;
883         uccf = priv->uccf;
884
885         /* restore gumr guemr */
886         iowrite8(priv->guemr, &uf_regs->guemr);
887         iowrite32be(priv->gumr, &uf_regs->gumr);
888
889         /* Set Virtual Fifo registers */
890         iowrite16be(uf_info->urfs, &uf_regs->urfs);
891         iowrite16be(uf_info->urfet, &uf_regs->urfet);
892         iowrite16be(uf_info->urfset, &uf_regs->urfset);
893         iowrite16be(uf_info->utfs, &uf_regs->utfs);
894         iowrite16be(uf_info->utfet, &uf_regs->utfet);
895         iowrite16be(uf_info->utftt, &uf_regs->utftt);
896         /* utfb, urfb are offsets from MURAM base */
897         iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
898         iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
899
900         /* Rx Tx and sync clock routing */
901         resume_clk_config(priv);
902
903         iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
904         iowrite32be(0xffffffff, &uf_regs->ucce);
905
906         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
907
908         /* rebuild SIRAM */
909         if (priv->tsa)
910                 ucc_tdm_init(priv->utdm, priv->ut_info);
911
912         /* Write to QE CECR, UCCx channel to Stop Transmission */
913         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
914         ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
915                            (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
916
917         /* Set UPSMR normal mode */
918         iowrite32be(0, &uf_regs->upsmr);
919
920         /* init parameter base */
921         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
922         ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
923                            QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
924
925         priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
926                                 qe_muram_addr(priv->ucc_pram_offset);
927
928         /* restore ucc parameter */
929         memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
930                     sizeof(struct ucc_hdlc_param));
931         kfree(priv->ucc_pram_bak);
932
933         /* rebuild BD entry */
934         for (i = 0; i < RX_BD_RING_LEN; i++) {
935                 if (i < (RX_BD_RING_LEN - 1))
936                         bd_status = R_E_S | R_I_S;
937                 else
938                         bd_status = R_E_S | R_I_S | R_W_S;
939
940                 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
941                 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
942                             &priv->rx_bd_base[i].buf);
943         }
944
945         for (i = 0; i < TX_BD_RING_LEN; i++) {
946                 if (i < (TX_BD_RING_LEN - 1))
947                         bd_status =  T_I_S | T_TC_S;
948                 else
949                         bd_status =  T_I_S | T_TC_S | T_W_S;
950
951                 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
952                 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
953                             &priv->tx_bd_base[i].buf);
954         }
955
956         /* if hdlc is busy enable TX and RX */
957         if (priv->hdlc_busy == 1) {
958                 cecr_subblock = ucc_fast_get_qe_cr_subblock(
959                                         priv->ut_info->uf_info.ucc_num);
960
961                 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
962                              (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
963
964                 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
965
966                 /* Enable the TDM port */
967                 if (priv->tsa)
968                         utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
969         }
970
971         napi_enable(&priv->napi);
972         netif_device_attach(priv->ndev);
973
974         return 0;
975 }
976
977 static const struct dev_pm_ops uhdlc_pm_ops = {
978         .suspend = uhdlc_suspend,
979         .resume = uhdlc_resume,
980         .freeze = uhdlc_suspend,
981         .thaw = uhdlc_resume,
982 };
983
984 #define HDLC_PM_OPS (&uhdlc_pm_ops)
985
986 #else
987
988 #define HDLC_PM_OPS NULL
989
990 #endif
991 static const struct net_device_ops uhdlc_ops = {
992         .ndo_open       = uhdlc_open,
993         .ndo_stop       = uhdlc_close,
994         .ndo_start_xmit = hdlc_start_xmit,
995         .ndo_do_ioctl   = uhdlc_ioctl,
996 };
997
998 static int ucc_hdlc_probe(struct platform_device *pdev)
999 {
1000         struct device_node *np = pdev->dev.of_node;
1001         struct ucc_hdlc_private *uhdlc_priv = NULL;
1002         struct ucc_tdm_info *ut_info;
1003         struct ucc_tdm *utdm = NULL;
1004         struct resource res;
1005         struct net_device *dev;
1006         hdlc_device *hdlc;
1007         int ucc_num;
1008         const char *sprop;
1009         int ret;
1010         u32 val;
1011
1012         ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1013         if (ret) {
1014                 dev_err(&pdev->dev, "Invalid ucc property\n");
1015                 return -ENODEV;
1016         }
1017
1018         ucc_num = val - 1;
1019         if ((ucc_num > 3) || (ucc_num < 0)) {
1020                 dev_err(&pdev->dev, ": Invalid UCC num\n");
1021                 return -EINVAL;
1022         }
1023
1024         memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1025                sizeof(utdm_primary_info));
1026
1027         ut_info = &utdm_info[ucc_num];
1028         ut_info->uf_info.ucc_num = ucc_num;
1029
1030         sprop = of_get_property(np, "rx-clock-name", NULL);
1031         if (sprop) {
1032                 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1033                 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1034                     (ut_info->uf_info.rx_clock > QE_CLK24)) {
1035                         dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1036                         return -EINVAL;
1037                 }
1038         } else {
1039                 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1040                 return -EINVAL;
1041         }
1042
1043         sprop = of_get_property(np, "tx-clock-name", NULL);
1044         if (sprop) {
1045                 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1046                 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1047                     (ut_info->uf_info.tx_clock > QE_CLK24)) {
1048                         dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1049                         return -EINVAL;
1050                 }
1051         } else {
1052                 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1053                 return -EINVAL;
1054         }
1055
1056         ret = of_address_to_resource(np, 0, &res);
1057         if (ret)
1058                 return -EINVAL;
1059
1060         ut_info->uf_info.regs = res.start;
1061         ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1062
1063         uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1064         if (!uhdlc_priv) {
1065                 return -ENOMEM;
1066         }
1067
1068         dev_set_drvdata(&pdev->dev, uhdlc_priv);
1069         uhdlc_priv->dev = &pdev->dev;
1070         uhdlc_priv->ut_info = ut_info;
1071
1072         if (of_get_property(np, "fsl,tdm-interface", NULL))
1073                 uhdlc_priv->tsa = 1;
1074
1075         if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1076                 uhdlc_priv->loopback = 1;
1077
1078         if (of_get_property(np, "fsl,hdlc-bus", NULL))
1079                 uhdlc_priv->hdlc_bus = 1;
1080
1081         if (uhdlc_priv->tsa == 1) {
1082                 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1083                 if (!utdm) {
1084                         ret = -ENOMEM;
1085                         dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1086                         goto free_uhdlc_priv;
1087                 }
1088                 uhdlc_priv->utdm = utdm;
1089                 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1090                 if (ret)
1091                         goto free_utdm;
1092         }
1093
1094         ret = uhdlc_init(uhdlc_priv);
1095         if (ret) {
1096                 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1097                 goto free_utdm;
1098         }
1099
1100         dev = alloc_hdlcdev(uhdlc_priv);
1101         if (!dev) {
1102                 ret = -ENOMEM;
1103                 pr_err("ucc_hdlc: unable to allocate memory\n");
1104                 goto undo_uhdlc_init;
1105         }
1106
1107         uhdlc_priv->ndev = dev;
1108         hdlc = dev_to_hdlc(dev);
1109         dev->tx_queue_len = 16;
1110         dev->netdev_ops = &uhdlc_ops;
1111         hdlc->attach = ucc_hdlc_attach;
1112         hdlc->xmit = ucc_hdlc_tx;
1113         netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1114         if (register_hdlc_device(dev)) {
1115                 ret = -ENOBUFS;
1116                 pr_err("ucc_hdlc: unable to register hdlc device\n");
1117                 free_netdev(dev);
1118                 goto free_dev;
1119         }
1120
1121         return 0;
1122
1123 free_dev:
1124         free_netdev(dev);
1125 undo_uhdlc_init:
1126 free_utdm:
1127         if (uhdlc_priv->tsa)
1128                 kfree(utdm);
1129 free_uhdlc_priv:
1130         kfree(uhdlc_priv);
1131         return ret;
1132 }
1133
1134 static int ucc_hdlc_remove(struct platform_device *pdev)
1135 {
1136         struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1137
1138         uhdlc_memclean(priv);
1139
1140         if (priv->utdm->si_regs) {
1141                 iounmap(priv->utdm->si_regs);
1142                 priv->utdm->si_regs = NULL;
1143         }
1144
1145         if (priv->utdm->siram) {
1146                 iounmap(priv->utdm->siram);
1147                 priv->utdm->siram = NULL;
1148         }
1149         kfree(priv);
1150
1151         dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1152
1153         return 0;
1154 }
1155
1156 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1157         {
1158         .compatible = "fsl,ucc-hdlc",
1159         },
1160         {},
1161 };
1162
1163 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1164
1165 static struct platform_driver ucc_hdlc_driver = {
1166         .probe  = ucc_hdlc_probe,
1167         .remove = ucc_hdlc_remove,
1168         .driver = {
1169                 .name           = DRV_NAME,
1170                 .pm             = HDLC_PM_OPS,
1171                 .of_match_table = fsl_ucc_hdlc_of_match,
1172         },
1173 };
1174
1175 module_platform_driver(ucc_hdlc_driver);
1176 MODULE_LICENSE("GPL");
This page took 0.104873 seconds and 4 git commands to generate.