]> Git Repo - J-linux.git/blob - drivers/mmc/host/atmel-mci.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / mmc / host / atmel-mci.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Atmel MultiMedia Card Interface driver
4  *
5  * Copyright (C) 2004-2008 Atmel Corporation
6  */
7 #include <linux/blkdev.h>
8 #include <linux/clk.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/ioport.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/irq.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/platform_device.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/types.h>
28
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/sdio.h>
31
32 #include <linux/atmel_pdc.h>
33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pinctrl/consumer.h>
36 #include <linux/workqueue.h>
37
38 #include <asm/cacheflush.h>
39 #include <asm/io.h>
40 #include <linux/unaligned.h>
41
42 #define ATMCI_MAX_NR_SLOTS      2
43
44 /*
45  * Superset of MCI IP registers integrated in Atmel AT91 Processor
46  * Registers and bitfields marked with [2] are only available in MCI2
47  */
48
49 /* MCI Register Definitions */
50 #define ATMCI_CR                        0x0000  /* Control */
51 #define         ATMCI_CR_MCIEN                  BIT(0)          /* MCI Enable */
52 #define         ATMCI_CR_MCIDIS                 BIT(1)          /* MCI Disable */
53 #define         ATMCI_CR_PWSEN                  BIT(2)          /* Power Save Enable */
54 #define         ATMCI_CR_PWSDIS                 BIT(3)          /* Power Save Disable */
55 #define         ATMCI_CR_SWRST                  BIT(7)          /* Software Reset */
56 #define ATMCI_MR                        0x0004  /* Mode */
57 #define         ATMCI_MR_CLKDIV(x)              ((x) <<  0)     /* Clock Divider */
58 #define         ATMCI_MR_PWSDIV(x)              ((x) <<  8)     /* Power Saving Divider */
59 #define         ATMCI_MR_RDPROOF                BIT(11)         /* Read Proof */
60 #define         ATMCI_MR_WRPROOF                BIT(12)         /* Write Proof */
61 #define         ATMCI_MR_PDCFBYTE               BIT(13)         /* Force Byte Transfer */
62 #define         ATMCI_MR_PDCPADV                BIT(14)         /* Padding Value */
63 #define         ATMCI_MR_PDCMODE                BIT(15)         /* PDC-oriented Mode */
64 #define         ATMCI_MR_CLKODD(x)              ((x) << 16)     /* LSB of Clock Divider */
65 #define ATMCI_DTOR                      0x0008  /* Data Timeout */
66 #define         ATMCI_DTOCYC(x)                 ((x) <<  0)     /* Data Timeout Cycles */
67 #define         ATMCI_DTOMUL(x)                 ((x) <<  4)     /* Data Timeout Multiplier */
68 #define ATMCI_SDCR                      0x000c  /* SD Card / SDIO */
69 #define         ATMCI_SDCSEL_SLOT_A             (0 <<  0)       /* Select SD slot A */
70 #define         ATMCI_SDCSEL_SLOT_B             (1 <<  0)       /* Select SD slot A */
71 #define         ATMCI_SDCSEL_MASK               (3 <<  0)
72 #define         ATMCI_SDCBUS_1BIT               (0 <<  6)       /* 1-bit data bus */
73 #define         ATMCI_SDCBUS_4BIT               (2 <<  6)       /* 4-bit data bus */
74 #define         ATMCI_SDCBUS_8BIT               (3 <<  6)       /* 8-bit data bus[2] */
75 #define         ATMCI_SDCBUS_MASK               (3 <<  6)
76 #define ATMCI_ARGR                      0x0010  /* Command Argument */
77 #define ATMCI_CMDR                      0x0014  /* Command */
78 #define         ATMCI_CMDR_CMDNB(x)             ((x) <<  0)     /* Command Opcode */
79 #define         ATMCI_CMDR_RSPTYP_NONE          (0 <<  6)       /* No response */
80 #define         ATMCI_CMDR_RSPTYP_48BIT         (1 <<  6)       /* 48-bit response */
81 #define         ATMCI_CMDR_RSPTYP_136BIT        (2 <<  6)       /* 136-bit response */
82 #define         ATMCI_CMDR_SPCMD_INIT           (1 <<  8)       /* Initialization command */
83 #define         ATMCI_CMDR_SPCMD_SYNC           (2 <<  8)       /* Synchronized command */
84 #define         ATMCI_CMDR_SPCMD_INT            (4 <<  8)       /* Interrupt command */
85 #define         ATMCI_CMDR_SPCMD_INTRESP        (5 <<  8)       /* Interrupt response */
86 #define         ATMCI_CMDR_OPDCMD               (1 << 11)       /* Open Drain */
87 #define         ATMCI_CMDR_MAXLAT_5CYC          (0 << 12)       /* Max latency 5 cycles */
88 #define         ATMCI_CMDR_MAXLAT_64CYC         (1 << 12)       /* Max latency 64 cycles */
89 #define         ATMCI_CMDR_START_XFER           (1 << 16)       /* Start data transfer */
90 #define         ATMCI_CMDR_STOP_XFER            (2 << 16)       /* Stop data transfer */
91 #define         ATMCI_CMDR_TRDIR_WRITE          (0 << 18)       /* Write data */
92 #define         ATMCI_CMDR_TRDIR_READ           (1 << 18)       /* Read data */
93 #define         ATMCI_CMDR_BLOCK                (0 << 19)       /* Single-block transfer */
94 #define         ATMCI_CMDR_MULTI_BLOCK          (1 << 19)       /* Multi-block transfer */
95 #define         ATMCI_CMDR_STREAM               (2 << 19)       /* MMC Stream transfer */
96 #define         ATMCI_CMDR_SDIO_BYTE            (4 << 19)       /* SDIO Byte transfer */
97 #define         ATMCI_CMDR_SDIO_BLOCK           (5 << 19)       /* SDIO Block transfer */
98 #define         ATMCI_CMDR_SDIO_SUSPEND         (1 << 24)       /* SDIO Suspend Command */
99 #define         ATMCI_CMDR_SDIO_RESUME          (2 << 24)       /* SDIO Resume Command */
100 #define ATMCI_BLKR                      0x0018  /* Block */
101 #define         ATMCI_BCNT(x)                   ((x) <<  0)     /* Data Block Count */
102 #define         ATMCI_BLKLEN(x)                 ((x) << 16)     /* Data Block Length */
103 #define ATMCI_CSTOR                     0x001c  /* Completion Signal Timeout[2] */
104 #define         ATMCI_CSTOCYC(x)                ((x) <<  0)     /* CST cycles */
105 #define         ATMCI_CSTOMUL(x)                ((x) <<  4)     /* CST multiplier */
106 #define ATMCI_RSPR                      0x0020  /* Response 0 */
107 #define ATMCI_RSPR1                     0x0024  /* Response 1 */
108 #define ATMCI_RSPR2                     0x0028  /* Response 2 */
109 #define ATMCI_RSPR3                     0x002c  /* Response 3 */
110 #define ATMCI_RDR                       0x0030  /* Receive Data */
111 #define ATMCI_TDR                       0x0034  /* Transmit Data */
112 #define ATMCI_SR                        0x0040  /* Status */
113 #define ATMCI_IER                       0x0044  /* Interrupt Enable */
114 #define ATMCI_IDR                       0x0048  /* Interrupt Disable */
115 #define ATMCI_IMR                       0x004c  /* Interrupt Mask */
116 #define         ATMCI_CMDRDY                    BIT(0)          /* Command Ready */
117 #define         ATMCI_RXRDY                     BIT(1)          /* Receiver Ready */
118 #define         ATMCI_TXRDY                     BIT(2)          /* Transmitter Ready */
119 #define         ATMCI_BLKE                      BIT(3)          /* Data Block Ended */
120 #define         ATMCI_DTIP                      BIT(4)          /* Data Transfer In Progress */
121 #define         ATMCI_NOTBUSY                   BIT(5)          /* Data Not Busy */
122 #define         ATMCI_ENDRX                     BIT(6)          /* End of RX Buffer */
123 #define         ATMCI_ENDTX                     BIT(7)          /* End of TX Buffer */
124 #define         ATMCI_SDIOIRQA                  BIT(8)          /* SDIO IRQ in slot A */
125 #define         ATMCI_SDIOIRQB                  BIT(9)          /* SDIO IRQ in slot B */
126 #define         ATMCI_SDIOWAIT                  BIT(12)         /* SDIO Read Wait Operation Status */
127 #define         ATMCI_CSRCV                     BIT(13)         /* CE-ATA Completion Signal Received */
128 #define         ATMCI_RXBUFF                    BIT(14)         /* RX Buffer Full */
129 #define         ATMCI_TXBUFE                    BIT(15)         /* TX Buffer Empty */
130 #define         ATMCI_RINDE                     BIT(16)         /* Response Index Error */
131 #define         ATMCI_RDIRE                     BIT(17)         /* Response Direction Error */
132 #define         ATMCI_RCRCE                     BIT(18)         /* Response CRC Error */
133 #define         ATMCI_RENDE                     BIT(19)         /* Response End Bit Error */
134 #define         ATMCI_RTOE                      BIT(20)         /* Response Time-Out Error */
135 #define         ATMCI_DCRCE                     BIT(21)         /* Data CRC Error */
136 #define         ATMCI_DTOE                      BIT(22)         /* Data Time-Out Error */
137 #define         ATMCI_CSTOE                     BIT(23)         /* Completion Signal Time-out Error */
138 #define         ATMCI_BLKOVRE                   BIT(24)         /* DMA Block Overrun Error */
139 #define         ATMCI_DMADONE                   BIT(25)         /* DMA Transfer Done */
140 #define         ATMCI_FIFOEMPTY                 BIT(26)         /* FIFO Empty Flag */
141 #define         ATMCI_XFRDONE                   BIT(27)         /* Transfer Done Flag */
142 #define         ATMCI_ACKRCV                    BIT(28)         /* Boot Operation Acknowledge Received */
143 #define         ATMCI_ACKRCVE                   BIT(29)         /* Boot Operation Acknowledge Error */
144 #define         ATMCI_OVRE                      BIT(30)         /* RX Overrun Error */
145 #define         ATMCI_UNRE                      BIT(31)         /* TX Underrun Error */
146 #define ATMCI_DMA                       0x0050  /* DMA Configuration[2] */
147 #define         ATMCI_DMA_OFFSET(x)             ((x) <<  0)     /* DMA Write Buffer Offset */
148 #define         ATMCI_DMA_CHKSIZE(x)            ((x) <<  4)     /* DMA Channel Read and Write Chunk Size */
149 #define         ATMCI_DMAEN                     BIT(8)  /* DMA Hardware Handshaking Enable */
150 #define ATMCI_CFG                       0x0054  /* Configuration[2] */
151 #define         ATMCI_CFG_FIFOMODE_1DATA        BIT(0)          /* MCI Internal FIFO control mode */
152 #define         ATMCI_CFG_FERRCTRL_COR          BIT(4)          /* Flow Error flag reset control mode */
153 #define         ATMCI_CFG_HSMODE                BIT(8)          /* High Speed Mode */
154 #define         ATMCI_CFG_LSYNC                 BIT(12)         /* Synchronize on the last block */
155 #define ATMCI_WPMR                      0x00e4  /* Write Protection Mode[2] */
156 #define         ATMCI_WP_EN                     BIT(0)          /* WP Enable */
157 #define         ATMCI_WP_KEY                    (0x4d4349 << 8) /* WP Key */
158 #define ATMCI_WPSR                      0x00e8  /* Write Protection Status[2] */
159 #define         ATMCI_GET_WP_VS(x)              ((x) & 0x0f)
160 #define         ATMCI_GET_WP_VSRC(x)            (((x) >> 8) & 0xffff)
161 #define ATMCI_VERSION                   0x00FC  /* Version */
162 #define ATMCI_FIFO_APERTURE             0x0200  /* FIFO Aperture[2] */
163
164 /* This is not including the FIFO Aperture on MCI2 */
165 #define ATMCI_REGS_SIZE         0x100
166
167 /* Register access macros */
168 #define atmci_readl(port, reg)                          \
169         __raw_readl((port)->regs + reg)
170 #define atmci_writel(port, reg, value)                  \
171         __raw_writel((value), (port)->regs + reg)
172
173 #define ATMCI_CMD_TIMEOUT_MS    2000
174 #define AUTOSUSPEND_DELAY       50
175
176 #define ATMCI_DATA_ERROR_FLAGS  (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
177 #define ATMCI_DMA_THRESHOLD     16
178
179 enum {
180         EVENT_CMD_RDY = 0,
181         EVENT_XFER_COMPLETE,
182         EVENT_NOTBUSY,
183         EVENT_DATA_ERROR,
184 };
185
186 enum atmel_mci_state {
187         STATE_IDLE = 0,
188         STATE_SENDING_CMD,
189         STATE_DATA_XFER,
190         STATE_WAITING_NOTBUSY,
191         STATE_SENDING_STOP,
192         STATE_END_REQUEST,
193 };
194
195 enum atmci_xfer_dir {
196         XFER_RECEIVE = 0,
197         XFER_TRANSMIT,
198 };
199
200 enum atmci_pdc_buf {
201         PDC_FIRST_BUF = 0,
202         PDC_SECOND_BUF,
203 };
204
205 /**
206  * struct mci_slot_pdata - board-specific per-slot configuration
207  * @bus_width: Number of data lines wired up the slot
208  * @detect_pin: GPIO pin wired to the card detect switch
209  * @wp_pin: GPIO pin wired to the write protect sensor
210  * @non_removable: The slot is not removable, only detect once
211  *
212  * If a given slot is not present on the board, @bus_width should be
213  * set to 0. The other fields are ignored in this case.
214  *
215  * Any pins that aren't available should be set to a negative value.
216  *
217  * Note that support for multiple slots is experimental -- some cards
218  * might get upset if we don't get the clock management exactly right.
219  * But in most cases, it should work just fine.
220  */
221 struct mci_slot_pdata {
222         unsigned int            bus_width;
223         struct gpio_desc        *detect_pin;
224         struct gpio_desc        *wp_pin;
225         bool                    non_removable;
226 };
227
228 struct atmel_mci_caps {
229         bool    has_dma_conf_reg;
230         bool    has_pdc;
231         bool    has_cfg_reg;
232         bool    has_cstor_reg;
233         bool    has_highspeed;
234         bool    has_rwproof;
235         bool    has_odd_clk_div;
236         bool    has_bad_data_ordering;
237         bool    need_reset_after_xfer;
238         bool    need_blksz_mul_4;
239         bool    need_notbusy_for_read_ops;
240 };
241
242 struct atmel_mci_dma {
243         struct dma_chan                 *chan;
244         struct dma_async_tx_descriptor  *data_desc;
245 };
246
247 /**
248  * struct atmel_mci - MMC controller state shared between all slots
249  * @lock: Spinlock protecting the queue and associated data.
250  * @regs: Pointer to MMIO registers.
251  * @sg: Scatterlist entry currently being processed by PIO or PDC code.
252  * @sg_len: Size of the scatterlist
253  * @pio_offset: Offset into the current scatterlist entry.
254  * @buffer: Buffer used if we don't have the r/w proof capability. We
255  *      don't have the time to switch pdc buffers so we have to use only
256  *      one buffer for the full transaction.
257  * @buf_size: size of the buffer.
258  * @buf_phys_addr: buffer address needed for pdc.
259  * @cur_slot: The slot which is currently using the controller.
260  * @mrq: The request currently being processed on @cur_slot,
261  *      or NULL if the controller is idle.
262  * @cmd: The command currently being sent to the card, or NULL.
263  * @data: The data currently being transferred, or NULL if no data
264  *      transfer is in progress.
265  * @data_size: just data->blocks * data->blksz.
266  * @dma: DMA client state.
267  * @data_chan: DMA channel being used for the current data transfer.
268  * @dma_conf: Configuration for the DMA slave
269  * @cmd_status: Snapshot of SR taken upon completion of the current
270  *      command. Only valid when EVENT_CMD_COMPLETE is pending.
271  * @data_status: Snapshot of SR taken upon completion of the current
272  *      data transfer. Only valid when EVENT_DATA_COMPLETE or
273  *      EVENT_DATA_ERROR is pending.
274  * @stop_cmdr: Value to be loaded into CMDR when the stop command is
275  *      to be sent.
276  * @bh_work: Work running the request state machine.
277  * @pending_events: Bitmask of events flagged by the interrupt handler
278  *      to be processed by the work.
279  * @completed_events: Bitmask of events which the state machine has
280  *      processed.
281  * @state: Work state.
282  * @queue: List of slots waiting for access to the controller.
283  * @need_clock_update: Update the clock rate before the next request.
284  * @need_reset: Reset controller before next request.
285  * @timer: Timer to balance the data timeout error flag which cannot rise.
286  * @mode_reg: Value of the MR register.
287  * @cfg_reg: Value of the CFG register.
288  * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
289  *      rate and timeout calculations.
290  * @mapbase: Physical address of the MMIO registers.
291  * @mck: The peripheral bus clock hooked up to the MMC controller.
292  * @dev: Device associated with the MMC controller.
293  * @pdata: Per-slot configuration data.
294  * @slot: Slots sharing this MMC controller.
295  * @caps: MCI capabilities depending on MCI version.
296  * @prepare_data: function to setup MCI before data transfer which
297  * depends on MCI capabilities.
298  * @submit_data: function to start data transfer which depends on MCI
299  * capabilities.
300  * @stop_transfer: function to stop data transfer which depends on MCI
301  * capabilities.
302  *
303  * Locking
304  * =======
305  *
306  * @lock is a softirq-safe spinlock protecting @queue as well as
307  * @cur_slot, @mrq and @state. These must always be updated
308  * at the same time while holding @lock.
309  *
310  * @lock also protects mode_reg and need_clock_update since these are
311  * used to synchronize mode register updates with the queue
312  * processing.
313  *
314  * The @mrq field of struct atmel_mci_slot is also protected by @lock,
315  * and must always be written at the same time as the slot is added to
316  * @queue.
317  *
318  * @pending_events and @completed_events are accessed using atomic bit
319  * operations, so they don't need any locking.
320  *
321  * None of the fields touched by the interrupt handler need any
322  * locking. However, ordering is important: Before EVENT_DATA_ERROR or
323  * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
324  * interrupts must be disabled and @data_status updated with a
325  * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
326  * CMDRDY interrupt must be disabled and @cmd_status updated with a
327  * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
328  * bytes_xfered field of @data must be written. This is ensured by
329  * using barriers.
330  */
331 struct atmel_mci {
332         spinlock_t              lock;
333         void __iomem            *regs;
334
335         struct scatterlist      *sg;
336         unsigned int            sg_len;
337         unsigned int            pio_offset;
338         unsigned int            *buffer;
339         unsigned int            buf_size;
340         dma_addr_t              buf_phys_addr;
341
342         struct atmel_mci_slot   *cur_slot;
343         struct mmc_request      *mrq;
344         struct mmc_command      *cmd;
345         struct mmc_data         *data;
346         unsigned int            data_size;
347
348         struct atmel_mci_dma    dma;
349         struct dma_chan         *data_chan;
350         struct dma_slave_config dma_conf;
351
352         u32                     cmd_status;
353         u32                     data_status;
354         u32                     stop_cmdr;
355
356         struct work_struct      bh_work;
357         unsigned long           pending_events;
358         unsigned long           completed_events;
359         enum atmel_mci_state    state;
360         struct list_head        queue;
361
362         bool                    need_clock_update;
363         bool                    need_reset;
364         struct timer_list       timer;
365         u32                     mode_reg;
366         u32                     cfg_reg;
367         unsigned long           bus_hz;
368         unsigned long           mapbase;
369         struct clk              *mck;
370         struct device           *dev;
371
372         struct mci_slot_pdata   pdata[ATMCI_MAX_NR_SLOTS];
373         struct atmel_mci_slot   *slot[ATMCI_MAX_NR_SLOTS];
374
375         struct atmel_mci_caps   caps;
376
377         u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
378         void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
379         void (*stop_transfer)(struct atmel_mci *host);
380 };
381
382 /**
383  * struct atmel_mci_slot - MMC slot state
384  * @mmc: The mmc_host representing this slot.
385  * @host: The MMC controller this slot is using.
386  * @sdc_reg: Value of SDCR to be written before using this slot.
387  * @sdio_irq: SDIO irq mask for this slot.
388  * @mrq: mmc_request currently being processed or waiting to be
389  *      processed, or NULL when the slot is idle.
390  * @queue_node: List node for placing this node in the @queue list of
391  *      &struct atmel_mci.
392  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
393  * @flags: Random state bits associated with the slot.
394  * @detect_pin: GPIO pin used for card detection, or negative if not
395  *      available.
396  * @wp_pin: GPIO pin used for card write protect sending, or negative
397  *      if not available.
398  * @detect_timer: Timer used for debouncing @detect_pin interrupts.
399  */
400 struct atmel_mci_slot {
401         struct mmc_host         *mmc;
402         struct atmel_mci        *host;
403
404         u32                     sdc_reg;
405         u32                     sdio_irq;
406
407         struct mmc_request      *mrq;
408         struct list_head        queue_node;
409
410         unsigned int            clock;
411         unsigned long           flags;
412 #define ATMCI_CARD_PRESENT      0
413 #define ATMCI_CARD_NEED_INIT    1
414 #define ATMCI_SHUTDOWN          2
415
416         struct gpio_desc        *detect_pin;
417         struct gpio_desc        *wp_pin;
418
419         struct timer_list       detect_timer;
420 };
421
422 #define atmci_test_and_clear_pending(host, event)               \
423         test_and_clear_bit(event, &host->pending_events)
424 #define atmci_set_completed(host, event)                        \
425         set_bit(event, &host->completed_events)
426 #define atmci_set_pending(host, event)                          \
427         set_bit(event, &host->pending_events)
428
429 /*
430  * The debugfs stuff below is mostly optimized away when
431  * CONFIG_DEBUG_FS is not set.
432  */
433 static int atmci_req_show(struct seq_file *s, void *v)
434 {
435         struct atmel_mci_slot   *slot = s->private;
436         struct mmc_request      *mrq;
437         struct mmc_command      *cmd;
438         struct mmc_command      *stop;
439         struct mmc_data         *data;
440
441         /* Make sure we get a consistent snapshot */
442         spin_lock_bh(&slot->host->lock);
443         mrq = slot->mrq;
444
445         if (mrq) {
446                 cmd = mrq->cmd;
447                 data = mrq->data;
448                 stop = mrq->stop;
449
450                 if (cmd)
451                         seq_printf(s,
452                                 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
453                                 cmd->opcode, cmd->arg, cmd->flags,
454                                 cmd->resp[0], cmd->resp[1], cmd->resp[2],
455                                 cmd->resp[3], cmd->error);
456                 if (data)
457                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
458                                 data->bytes_xfered, data->blocks,
459                                 data->blksz, data->flags, data->error);
460                 if (stop)
461                         seq_printf(s,
462                                 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
463                                 stop->opcode, stop->arg, stop->flags,
464                                 stop->resp[0], stop->resp[1], stop->resp[2],
465                                 stop->resp[3], stop->error);
466         }
467
468         spin_unlock_bh(&slot->host->lock);
469
470         return 0;
471 }
472
473 DEFINE_SHOW_ATTRIBUTE(atmci_req);
474
475 static void atmci_show_status_reg(struct seq_file *s,
476                 const char *regname, u32 value)
477 {
478         static const char       *sr_bit[] = {
479                 [0]     = "CMDRDY",
480                 [1]     = "RXRDY",
481                 [2]     = "TXRDY",
482                 [3]     = "BLKE",
483                 [4]     = "DTIP",
484                 [5]     = "NOTBUSY",
485                 [6]     = "ENDRX",
486                 [7]     = "ENDTX",
487                 [8]     = "SDIOIRQA",
488                 [9]     = "SDIOIRQB",
489                 [12]    = "SDIOWAIT",
490                 [14]    = "RXBUFF",
491                 [15]    = "TXBUFE",
492                 [16]    = "RINDE",
493                 [17]    = "RDIRE",
494                 [18]    = "RCRCE",
495                 [19]    = "RENDE",
496                 [20]    = "RTOE",
497                 [21]    = "DCRCE",
498                 [22]    = "DTOE",
499                 [23]    = "CSTOE",
500                 [24]    = "BLKOVRE",
501                 [25]    = "DMADONE",
502                 [26]    = "FIFOEMPTY",
503                 [27]    = "XFRDONE",
504                 [30]    = "OVRE",
505                 [31]    = "UNRE",
506         };
507         unsigned int            i;
508
509         seq_printf(s, "%s:\t0x%08x", regname, value);
510         for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
511                 if (value & (1 << i)) {
512                         if (sr_bit[i])
513                                 seq_printf(s, " %s", sr_bit[i]);
514                         else
515                                 seq_puts(s, " UNKNOWN");
516                 }
517         }
518         seq_putc(s, '\n');
519 }
520
521 static int atmci_regs_show(struct seq_file *s, void *v)
522 {
523         struct atmel_mci        *host = s->private;
524         struct device           *dev = host->dev;
525         u32                     *buf;
526         int                     ret = 0;
527
528
529         buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
530         if (!buf)
531                 return -ENOMEM;
532
533         pm_runtime_get_sync(dev);
534
535         /*
536          * Grab a more or less consistent snapshot. Note that we're
537          * not disabling interrupts, so IMR and SR may not be
538          * consistent.
539          */
540         spin_lock_bh(&host->lock);
541         memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
542         spin_unlock_bh(&host->lock);
543
544         pm_runtime_mark_last_busy(dev);
545         pm_runtime_put_autosuspend(dev);
546
547         seq_printf(s, "MR:\t0x%08x%s%s ",
548                         buf[ATMCI_MR / 4],
549                         buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
550                         buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
551         if (host->caps.has_odd_clk_div)
552                 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
553                                 ((buf[ATMCI_MR / 4] & 0xff) << 1)
554                                 | ((buf[ATMCI_MR / 4] >> 16) & 1));
555         else
556                 seq_printf(s, "CLKDIV=%u\n",
557                                 (buf[ATMCI_MR / 4] & 0xff));
558         seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
559         seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
560         seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
561         seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
562                         buf[ATMCI_BLKR / 4],
563                         buf[ATMCI_BLKR / 4] & 0xffff,
564                         (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
565         if (host->caps.has_cstor_reg)
566                 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
567
568         /* Don't read RSPR and RDR; it will consume the data there */
569
570         atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
571         atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
572
573         if (host->caps.has_dma_conf_reg) {
574                 u32 val;
575
576                 val = buf[ATMCI_DMA / 4];
577                 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
578                                 val, val & 3,
579                                 ((val >> 4) & 3) ?
580                                         1 << (((val >> 4) & 3) + 1) : 1,
581                                 val & ATMCI_DMAEN ? " DMAEN" : "");
582         }
583         if (host->caps.has_cfg_reg) {
584                 u32 val;
585
586                 val = buf[ATMCI_CFG / 4];
587                 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
588                                 val,
589                                 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
590                                 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
591                                 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
592                                 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
593         }
594
595         kfree(buf);
596
597         return ret;
598 }
599
600 DEFINE_SHOW_ATTRIBUTE(atmci_regs);
601
602 static void atmci_init_debugfs(struct atmel_mci_slot *slot)
603 {
604         struct mmc_host         *mmc = slot->mmc;
605         struct atmel_mci        *host = slot->host;
606         struct dentry           *root;
607
608         root = mmc->debugfs_root;
609         if (!root)
610                 return;
611
612         debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
613         debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
614         debugfs_create_u32("state", S_IRUSR, root, &host->state);
615         debugfs_create_xul("pending_events", S_IRUSR, root,
616                            &host->pending_events);
617         debugfs_create_xul("completed_events", S_IRUSR, root,
618                            &host->completed_events);
619 }
620
621 static const struct of_device_id atmci_dt_ids[] = {
622         { .compatible = "atmel,hsmci" },
623         { /* sentinel */ }
624 };
625
626 MODULE_DEVICE_TABLE(of, atmci_dt_ids);
627
628 static int atmci_of_init(struct atmel_mci *host)
629 {
630         struct device *dev = host->dev;
631         struct device_node *np = dev->of_node;
632         struct device_node *cnp;
633         u32 slot_id;
634         int err;
635
636         if (!np)
637                 return dev_err_probe(dev, -EINVAL, "device node not found\n");
638
639         for_each_child_of_node(np, cnp) {
640                 if (of_property_read_u32(cnp, "reg", &slot_id)) {
641                         dev_warn(dev, "reg property is missing for %pOF\n", cnp);
642                         continue;
643                 }
644
645                 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
646                         dev_warn(dev, "can't have more than %d slots\n",
647                                  ATMCI_MAX_NR_SLOTS);
648                         of_node_put(cnp);
649                         break;
650                 }
651
652                 if (of_property_read_u32(cnp, "bus-width",
653                                          &host->pdata[slot_id].bus_width))
654                         host->pdata[slot_id].bus_width = 1;
655
656                 host->pdata[slot_id].detect_pin =
657                         devm_fwnode_gpiod_get(dev, of_fwnode_handle(cnp),
658                                               "cd", GPIOD_IN, "cd-gpios");
659                 err = PTR_ERR_OR_ZERO(host->pdata[slot_id].detect_pin);
660                 if (err) {
661                         if (err != -ENOENT) {
662                                 of_node_put(cnp);
663                                 return err;
664                         }
665                         host->pdata[slot_id].detect_pin = NULL;
666                 }
667
668                 host->pdata[slot_id].non_removable =
669                         of_property_read_bool(cnp, "non-removable");
670
671                 host->pdata[slot_id].wp_pin =
672                         devm_fwnode_gpiod_get(dev, of_fwnode_handle(cnp),
673                                               "wp", GPIOD_IN, "wp-gpios");
674                 err = PTR_ERR_OR_ZERO(host->pdata[slot_id].wp_pin);
675                 if (err) {
676                         if (err != -ENOENT) {
677                                 of_node_put(cnp);
678                                 return err;
679                         }
680                         host->pdata[slot_id].wp_pin = NULL;
681                 }
682         }
683
684         return 0;
685 }
686
687 static inline unsigned int atmci_get_version(struct atmel_mci *host)
688 {
689         return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
690 }
691
692 /*
693  * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
694  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
695  * With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2,
696  * 8 -> 3, 16 -> 4.
697  *
698  * This can be done by finding most significant bit set.
699  */
700 static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
701                                                  unsigned int maxburst)
702 {
703         unsigned int version = atmci_get_version(host);
704         unsigned int offset = 2;
705
706         if (version >= 0x600)
707                 offset = 1;
708
709         if (maxburst > 1)
710                 return fls(maxburst) - offset;
711         else
712                 return 0;
713 }
714
715 static void atmci_timeout_timer(struct timer_list *t)
716 {
717         struct atmel_mci *host = from_timer(host, t, timer);
718         struct device *dev = host->dev;
719
720         dev_dbg(dev, "software timeout\n");
721
722         if (host->mrq->cmd->data) {
723                 host->mrq->cmd->data->error = -ETIMEDOUT;
724                 host->data = NULL;
725                 /*
726                  * With some SDIO modules, sometimes DMA transfer hangs. If
727                  * stop_transfer() is not called then the DMA request is not
728                  * removed, following ones are queued and never computed.
729                  */
730                 if (host->state == STATE_DATA_XFER)
731                         host->stop_transfer(host);
732         } else {
733                 host->mrq->cmd->error = -ETIMEDOUT;
734                 host->cmd = NULL;
735         }
736         host->need_reset = 1;
737         host->state = STATE_END_REQUEST;
738         smp_wmb();
739         queue_work(system_bh_wq, &host->bh_work);
740 }
741
742 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
743                                         unsigned int ns)
744 {
745         /*
746          * It is easier here to use us instead of ns for the timeout,
747          * it prevents from overflows during calculation.
748          */
749         unsigned int us = DIV_ROUND_UP(ns, 1000);
750
751         /* Maximum clock frequency is host->bus_hz/2 */
752         return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
753 }
754
755 static void atmci_set_timeout(struct atmel_mci *host,
756                 struct atmel_mci_slot *slot, struct mmc_data *data)
757 {
758         static unsigned dtomul_to_shift[] = {
759                 0, 4, 7, 8, 10, 12, 16, 20
760         };
761         unsigned        timeout;
762         unsigned        dtocyc;
763         unsigned        dtomul;
764
765         timeout = atmci_ns_to_clocks(host, data->timeout_ns)
766                 + data->timeout_clks;
767
768         for (dtomul = 0; dtomul < 8; dtomul++) {
769                 unsigned shift = dtomul_to_shift[dtomul];
770                 dtocyc = (timeout + (1 << shift) - 1) >> shift;
771                 if (dtocyc < 15)
772                         break;
773         }
774
775         if (dtomul >= 8) {
776                 dtomul = 7;
777                 dtocyc = 15;
778         }
779
780         dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
781                         dtocyc << dtomul_to_shift[dtomul]);
782         atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
783 }
784
785 /*
786  * Return mask with command flags to be enabled for this command.
787  */
788 static u32 atmci_prepare_command(struct mmc_host *mmc,
789                                  struct mmc_command *cmd)
790 {
791         struct mmc_data *data;
792         u32             cmdr;
793
794         cmd->error = -EINPROGRESS;
795
796         cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
797
798         if (cmd->flags & MMC_RSP_PRESENT) {
799                 if (cmd->flags & MMC_RSP_136)
800                         cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
801                 else
802                         cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
803         }
804
805         /*
806          * This should really be MAXLAT_5 for CMD2 and ACMD41, but
807          * it's too difficult to determine whether this is an ACMD or
808          * not. Better make it 64.
809          */
810         cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
811
812         if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
813                 cmdr |= ATMCI_CMDR_OPDCMD;
814
815         data = cmd->data;
816         if (data) {
817                 cmdr |= ATMCI_CMDR_START_XFER;
818
819                 if (cmd->opcode == SD_IO_RW_EXTENDED) {
820                         cmdr |= ATMCI_CMDR_SDIO_BLOCK;
821                 } else {
822                         if (data->blocks > 1)
823                                 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
824                         else
825                                 cmdr |= ATMCI_CMDR_BLOCK;
826                 }
827
828                 if (data->flags & MMC_DATA_READ)
829                         cmdr |= ATMCI_CMDR_TRDIR_READ;
830         }
831
832         return cmdr;
833 }
834
835 static void atmci_send_command(struct atmel_mci *host,
836                 struct mmc_command *cmd, u32 cmd_flags)
837 {
838         struct device *dev = host->dev;
839         unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
840                 ATMCI_CMD_TIMEOUT_MS;
841
842         WARN_ON(host->cmd);
843         host->cmd = cmd;
844
845         dev_vdbg(dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags);
846
847         atmci_writel(host, ATMCI_ARGR, cmd->arg);
848         atmci_writel(host, ATMCI_CMDR, cmd_flags);
849
850         mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
851 }
852
853 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
854 {
855         struct device *dev = host->dev;
856
857         dev_dbg(dev, "send stop command\n");
858         atmci_send_command(host, data->stop, host->stop_cmdr);
859         atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
860 }
861
862 /*
863  * Configure given PDC buffer taking care of alignment issues.
864  * Update host->data_size and host->sg.
865  */
866 static void atmci_pdc_set_single_buf(struct atmel_mci *host,
867         enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
868 {
869         u32 pointer_reg, counter_reg;
870         unsigned int buf_size;
871
872         if (dir == XFER_RECEIVE) {
873                 pointer_reg = ATMEL_PDC_RPR;
874                 counter_reg = ATMEL_PDC_RCR;
875         } else {
876                 pointer_reg = ATMEL_PDC_TPR;
877                 counter_reg = ATMEL_PDC_TCR;
878         }
879
880         if (buf_nb == PDC_SECOND_BUF) {
881                 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
882                 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
883         }
884
885         if (!host->caps.has_rwproof) {
886                 buf_size = host->buf_size;
887                 atmci_writel(host, pointer_reg, host->buf_phys_addr);
888         } else {
889                 buf_size = sg_dma_len(host->sg);
890                 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
891         }
892
893         if (host->data_size <= buf_size) {
894                 if (host->data_size & 0x3) {
895                         /* If size is different from modulo 4, transfer bytes */
896                         atmci_writel(host, counter_reg, host->data_size);
897                         atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
898                 } else {
899                         /* Else transfer 32-bits words */
900                         atmci_writel(host, counter_reg, host->data_size / 4);
901                 }
902                 host->data_size = 0;
903         } else {
904                 /* We assume the size of a page is 32-bits aligned */
905                 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
906                 host->data_size -= sg_dma_len(host->sg);
907                 if (host->data_size)
908                         host->sg = sg_next(host->sg);
909         }
910 }
911
912 /*
913  * Configure PDC buffer according to the data size ie configuring one or two
914  * buffers. Don't use this function if you want to configure only the second
915  * buffer. In this case, use atmci_pdc_set_single_buf.
916  */
917 static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
918 {
919         atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
920         if (host->data_size)
921                 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
922 }
923
924 /*
925  * Unmap sg lists, called when transfer is finished.
926  */
927 static void atmci_pdc_cleanup(struct atmel_mci *host)
928 {
929         struct mmc_data         *data = host->data;
930         struct device           *dev = host->dev;
931
932         if (data)
933                 dma_unmap_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
934 }
935
936 /*
937  * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
938  * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
939  * interrupt needed for both transfer directions.
940  */
941 static void atmci_pdc_complete(struct atmel_mci *host)
942 {
943         struct device *dev = host->dev;
944         int transfer_size = host->data->blocks * host->data->blksz;
945         int i;
946
947         atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
948
949         if ((!host->caps.has_rwproof)
950             && (host->data->flags & MMC_DATA_READ)) {
951                 if (host->caps.has_bad_data_ordering)
952                         for (i = 0; i < transfer_size; i++)
953                                 host->buffer[i] = swab32(host->buffer[i]);
954                 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
955                                     host->buffer, transfer_size);
956         }
957
958         atmci_pdc_cleanup(host);
959
960         dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
961         atmci_set_pending(host, EVENT_XFER_COMPLETE);
962         queue_work(system_bh_wq, &host->bh_work);
963 }
964
965 static void atmci_dma_cleanup(struct atmel_mci *host)
966 {
967         struct mmc_data                 *data = host->data;
968
969         if (data)
970                 dma_unmap_sg(host->dma.chan->device->dev,
971                                 data->sg, data->sg_len,
972                                 mmc_get_dma_dir(data));
973 }
974
975 /*
976  * This function is called by the DMA driver from bh context.
977  */
978 static void atmci_dma_complete(void *arg)
979 {
980         struct atmel_mci        *host = arg;
981         struct mmc_data         *data = host->data;
982         struct device           *dev = host->dev;
983
984         dev_vdbg(dev, "DMA complete\n");
985
986         if (host->caps.has_dma_conf_reg)
987                 /* Disable DMA hardware handshaking on MCI */
988                 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
989
990         atmci_dma_cleanup(host);
991
992         /*
993          * If the card was removed, data will be NULL. No point trying
994          * to send the stop command or waiting for NBUSY in this case.
995          */
996         if (data) {
997                 dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
998                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
999                 queue_work(system_bh_wq, &host->bh_work);
1000
1001                 /*
1002                  * Regardless of what the documentation says, we have
1003                  * to wait for NOTBUSY even after block read
1004                  * operations.
1005                  *
1006                  * When the DMA transfer is complete, the controller
1007                  * may still be reading the CRC from the card, i.e.
1008                  * the data transfer is still in progress and we
1009                  * haven't seen all the potential error bits yet.
1010                  *
1011                  * The interrupt handler will schedule a different
1012                  * bh work to finish things up when the data transfer
1013                  * is completely done.
1014                  *
1015                  * We may not complete the mmc request here anyway
1016                  * because the mmc layer may call back and cause us to
1017                  * violate the "don't submit new operations from the
1018                  * completion callback" rule of the dma engine
1019                  * framework.
1020                  */
1021                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1022         }
1023 }
1024
1025 /*
1026  * Returns a mask of interrupt flags to be enabled after the whole
1027  * request has been prepared.
1028  */
1029 static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
1030 {
1031         u32 iflags;
1032
1033         data->error = -EINPROGRESS;
1034
1035         host->sg = data->sg;
1036         host->sg_len = data->sg_len;
1037         host->data = data;
1038         host->data_chan = NULL;
1039
1040         iflags = ATMCI_DATA_ERROR_FLAGS;
1041
1042         /*
1043          * Errata: MMC data write operation with less than 12
1044          * bytes is impossible.
1045          *
1046          * Errata: MCI Transmit Data Register (TDR) FIFO
1047          * corruption when length is not multiple of 4.
1048          */
1049         if (data->blocks * data->blksz < 12
1050                         || (data->blocks * data->blksz) & 3)
1051                 host->need_reset = true;
1052
1053         host->pio_offset = 0;
1054         if (data->flags & MMC_DATA_READ)
1055                 iflags |= ATMCI_RXRDY;
1056         else
1057                 iflags |= ATMCI_TXRDY;
1058
1059         return iflags;
1060 }
1061
1062 /*
1063  * Set interrupt flags and set block length into the MCI mode register even
1064  * if this value is also accessible in the MCI block register. It seems to be
1065  * necessary before the High Speed MCI version. It also map sg and configure
1066  * PDC registers.
1067  */
1068 static u32
1069 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1070 {
1071         struct device *dev = host->dev;
1072         u32 iflags, tmp;
1073         int i;
1074
1075         data->error = -EINPROGRESS;
1076
1077         host->data = data;
1078         host->sg = data->sg;
1079         iflags = ATMCI_DATA_ERROR_FLAGS;
1080
1081         /* Enable pdc mode */
1082         atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
1083
1084         if (data->flags & MMC_DATA_READ)
1085                 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
1086         else
1087                 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
1088
1089         /* Set BLKLEN */
1090         tmp = atmci_readl(host, ATMCI_MR);
1091         tmp &= 0x0000ffff;
1092         tmp |= ATMCI_BLKLEN(data->blksz);
1093         atmci_writel(host, ATMCI_MR, tmp);
1094
1095         /* Configure PDC */
1096         host->data_size = data->blocks * data->blksz;
1097         dma_map_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
1098
1099         if ((!host->caps.has_rwproof)
1100             && (host->data->flags & MMC_DATA_WRITE)) {
1101                 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
1102                                   host->buffer, host->data_size);
1103                 if (host->caps.has_bad_data_ordering)
1104                         for (i = 0; i < host->data_size; i++)
1105                                 host->buffer[i] = swab32(host->buffer[i]);
1106         }
1107
1108         if (host->data_size)
1109                 atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
1110                                        XFER_RECEIVE : XFER_TRANSMIT);
1111         return iflags;
1112 }
1113
1114 static u32
1115 atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
1116 {
1117         struct dma_chan                 *chan;
1118         struct dma_async_tx_descriptor  *desc;
1119         struct scatterlist              *sg;
1120         unsigned int                    i;
1121         enum dma_transfer_direction     slave_dirn;
1122         unsigned int                    sglen;
1123         u32                             maxburst;
1124         u32 iflags;
1125
1126         data->error = -EINPROGRESS;
1127
1128         WARN_ON(host->data);
1129         host->sg = NULL;
1130         host->data = data;
1131
1132         iflags = ATMCI_DATA_ERROR_FLAGS;
1133
1134         /*
1135          * We don't do DMA on "complex" transfers, i.e. with
1136          * non-word-aligned buffers or lengths. Also, we don't bother
1137          * with all the DMA setup overhead for short transfers.
1138          */
1139         if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1140                 return atmci_prepare_data(host, data);
1141         if (data->blksz & 3)
1142                 return atmci_prepare_data(host, data);
1143
1144         for_each_sg(data->sg, sg, data->sg_len, i) {
1145                 if (sg->offset & 3 || sg->length & 3)
1146                         return atmci_prepare_data(host, data);
1147         }
1148
1149         /* If we don't have a channel, we can't do DMA */
1150         if (!host->dma.chan)
1151                 return -ENODEV;
1152
1153         chan = host->dma.chan;
1154         host->data_chan = chan;
1155
1156         if (data->flags & MMC_DATA_READ) {
1157                 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1158                 maxburst = atmci_convert_chksize(host,
1159                                                  host->dma_conf.src_maxburst);
1160         } else {
1161                 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1162                 maxburst = atmci_convert_chksize(host,
1163                                                  host->dma_conf.dst_maxburst);
1164         }
1165
1166         if (host->caps.has_dma_conf_reg)
1167                 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1168                         ATMCI_DMAEN);
1169
1170         sglen = dma_map_sg(chan->device->dev, data->sg,
1171                         data->sg_len, mmc_get_dma_dir(data));
1172
1173         dmaengine_slave_config(chan, &host->dma_conf);
1174         desc = dmaengine_prep_slave_sg(chan,
1175                         data->sg, sglen, slave_dirn,
1176                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1177         if (!desc)
1178                 goto unmap_exit;
1179
1180         host->dma.data_desc = desc;
1181         desc->callback = atmci_dma_complete;
1182         desc->callback_param = host;
1183
1184         return iflags;
1185 unmap_exit:
1186         dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
1187                      mmc_get_dma_dir(data));
1188         return -ENOMEM;
1189 }
1190
1191 static void
1192 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1193 {
1194         return;
1195 }
1196
1197 /*
1198  * Start PDC according to transfer direction.
1199  */
1200 static void
1201 atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1202 {
1203         if (data->flags & MMC_DATA_READ)
1204                 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1205         else
1206                 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1207 }
1208
1209 static void
1210 atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1211 {
1212         struct dma_chan                 *chan = host->data_chan;
1213         struct dma_async_tx_descriptor  *desc = host->dma.data_desc;
1214
1215         if (chan) {
1216                 dmaengine_submit(desc);
1217                 dma_async_issue_pending(chan);
1218         }
1219 }
1220
1221 static void atmci_stop_transfer(struct atmel_mci *host)
1222 {
1223         struct device *dev = host->dev;
1224
1225         dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
1226         atmci_set_pending(host, EVENT_XFER_COMPLETE);
1227         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1228 }
1229
1230 /*
1231  * Stop data transfer because error(s) occurred.
1232  */
1233 static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1234 {
1235         atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1236 }
1237
1238 static void atmci_stop_transfer_dma(struct atmel_mci *host)
1239 {
1240         struct dma_chan *chan = host->data_chan;
1241         struct device *dev = host->dev;
1242
1243         if (chan) {
1244                 dmaengine_terminate_all(chan);
1245                 atmci_dma_cleanup(host);
1246         } else {
1247                 /* Data transfer was stopped by the interrupt handler */
1248                 dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
1249                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1250                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1251         }
1252 }
1253
1254 /*
1255  * Start a request: prepare data if needed, prepare the command and activate
1256  * interrupts.
1257  */
1258 static void atmci_start_request(struct atmel_mci *host,
1259                 struct atmel_mci_slot *slot)
1260 {
1261         struct device           *dev = host->dev;
1262         struct mmc_request      *mrq;
1263         struct mmc_command      *cmd;
1264         struct mmc_data         *data;
1265         u32                     iflags;
1266         u32                     cmdflags;
1267
1268         mrq = slot->mrq;
1269         host->cur_slot = slot;
1270         host->mrq = mrq;
1271
1272         host->pending_events = 0;
1273         host->completed_events = 0;
1274         host->cmd_status = 0;
1275         host->data_status = 0;
1276
1277         dev_dbg(dev, "start request: cmd %u\n", mrq->cmd->opcode);
1278
1279         if (host->need_reset || host->caps.need_reset_after_xfer) {
1280                 iflags = atmci_readl(host, ATMCI_IMR);
1281                 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1282                 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1283                 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1284                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1285                 if (host->caps.has_cfg_reg)
1286                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1287                 atmci_writel(host, ATMCI_IER, iflags);
1288                 host->need_reset = false;
1289         }
1290         atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1291
1292         iflags = atmci_readl(host, ATMCI_IMR);
1293         if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1294                 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1295                                 iflags);
1296
1297         if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1298                 /* Send init sequence (74 clock cycles) */
1299                 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1300                 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1301                         cpu_relax();
1302         }
1303         iflags = 0;
1304         data = mrq->data;
1305         if (data) {
1306                 atmci_set_timeout(host, slot, data);
1307
1308                 /* Must set block count/size before sending command */
1309                 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1310                                 | ATMCI_BLKLEN(data->blksz));
1311                 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1312                         ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1313
1314                 iflags |= host->prepare_data(host, data);
1315         }
1316
1317         iflags |= ATMCI_CMDRDY;
1318         cmd = mrq->cmd;
1319         cmdflags = atmci_prepare_command(slot->mmc, cmd);
1320
1321         /*
1322          * DMA transfer should be started before sending the command to avoid
1323          * unexpected errors especially for read operations in SDIO mode.
1324          * Unfortunately, in PDC mode, command has to be sent before starting
1325          * the transfer.
1326          */
1327         if (host->submit_data != &atmci_submit_data_dma)
1328                 atmci_send_command(host, cmd, cmdflags);
1329
1330         if (data)
1331                 host->submit_data(host, data);
1332
1333         if (host->submit_data == &atmci_submit_data_dma)
1334                 atmci_send_command(host, cmd, cmdflags);
1335
1336         if (mrq->stop) {
1337                 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1338                 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1339                 if (!(data->flags & MMC_DATA_WRITE))
1340                         host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1341                 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1342         }
1343
1344         /*
1345          * We could have enabled interrupts earlier, but I suspect
1346          * that would open up a nice can of interesting race
1347          * conditions (e.g. command and data complete, but stop not
1348          * prepared yet.)
1349          */
1350         atmci_writel(host, ATMCI_IER, iflags);
1351 }
1352
1353 static void atmci_queue_request(struct atmel_mci *host,
1354                 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1355 {
1356         struct device *dev = host->dev;
1357
1358         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1359                         host->state);
1360
1361         spin_lock_bh(&host->lock);
1362         slot->mrq = mrq;
1363         if (host->state == STATE_IDLE) {
1364                 host->state = STATE_SENDING_CMD;
1365                 atmci_start_request(host, slot);
1366         } else {
1367                 dev_dbg(dev, "queue request\n");
1368                 list_add_tail(&slot->queue_node, &host->queue);
1369         }
1370         spin_unlock_bh(&host->lock);
1371 }
1372
1373 static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1374 {
1375         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1376         struct atmel_mci        *host = slot->host;
1377         struct device           *dev = host->dev;
1378         struct mmc_data         *data;
1379
1380         WARN_ON(slot->mrq);
1381         dev_dbg(dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1382
1383         /*
1384          * We may "know" the card is gone even though there's still an
1385          * electrical connection. If so, we really need to communicate
1386          * this to the MMC core since there won't be any more
1387          * interrupts as the card is completely removed. Otherwise,
1388          * the MMC core might believe the card is still there even
1389          * though the card was just removed very slowly.
1390          */
1391         if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1392                 mrq->cmd->error = -ENOMEDIUM;
1393                 mmc_request_done(mmc, mrq);
1394                 return;
1395         }
1396
1397         /* We don't support multiple blocks of weird lengths. */
1398         data = mrq->data;
1399         if (data && data->blocks > 1 && data->blksz & 3) {
1400                 mrq->cmd->error = -EINVAL;
1401                 mmc_request_done(mmc, mrq);
1402         }
1403
1404         atmci_queue_request(host, slot, mrq);
1405 }
1406
1407 static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1408 {
1409         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1410         struct atmel_mci        *host = slot->host;
1411         unsigned int            i;
1412
1413         slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1414         switch (ios->bus_width) {
1415         case MMC_BUS_WIDTH_1:
1416                 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1417                 break;
1418         case MMC_BUS_WIDTH_4:
1419                 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1420                 break;
1421         case MMC_BUS_WIDTH_8:
1422                 slot->sdc_reg |= ATMCI_SDCBUS_8BIT;
1423                 break;
1424         }
1425
1426         if (ios->clock) {
1427                 unsigned int clock_min = ~0U;
1428                 int clkdiv;
1429
1430                 spin_lock_bh(&host->lock);
1431                 if (!host->mode_reg) {
1432                         atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1433                         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1434                         if (host->caps.has_cfg_reg)
1435                                 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1436                 }
1437
1438                 /*
1439                  * Use mirror of ios->clock to prevent race with mmc
1440                  * core ios update when finding the minimum.
1441                  */
1442                 slot->clock = ios->clock;
1443                 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1444                         if (host->slot[i] && host->slot[i]->clock
1445                                         && host->slot[i]->clock < clock_min)
1446                                 clock_min = host->slot[i]->clock;
1447                 }
1448
1449                 /* Calculate clock divider */
1450                 if (host->caps.has_odd_clk_div) {
1451                         clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1452                         if (clkdiv < 0) {
1453                                 dev_warn(&mmc->class_dev,
1454                                          "clock %u too fast; using %lu\n",
1455                                          clock_min, host->bus_hz / 2);
1456                                 clkdiv = 0;
1457                         } else if (clkdiv > 511) {
1458                                 dev_warn(&mmc->class_dev,
1459                                          "clock %u too slow; using %lu\n",
1460                                          clock_min, host->bus_hz / (511 + 2));
1461                                 clkdiv = 511;
1462                         }
1463                         host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1464                                          | ATMCI_MR_CLKODD(clkdiv & 1);
1465                 } else {
1466                         clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1467                         if (clkdiv > 255) {
1468                                 dev_warn(&mmc->class_dev,
1469                                          "clock %u too slow; using %lu\n",
1470                                          clock_min, host->bus_hz / (2 * 256));
1471                                 clkdiv = 255;
1472                         }
1473                         host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1474                 }
1475
1476                 /*
1477                  * WRPROOF and RDPROOF prevent overruns/underruns by
1478                  * stopping the clock when the FIFO is full/empty.
1479                  * This state is not expected to last for long.
1480                  */
1481                 if (host->caps.has_rwproof)
1482                         host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1483
1484                 if (host->caps.has_cfg_reg) {
1485                         /* setup High Speed mode in relation with card capacity */
1486                         if (ios->timing == MMC_TIMING_SD_HS)
1487                                 host->cfg_reg |= ATMCI_CFG_HSMODE;
1488                         else
1489                                 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1490                 }
1491
1492                 if (list_empty(&host->queue)) {
1493                         atmci_writel(host, ATMCI_MR, host->mode_reg);
1494                         if (host->caps.has_cfg_reg)
1495                                 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1496                 } else {
1497                         host->need_clock_update = true;
1498                 }
1499
1500                 spin_unlock_bh(&host->lock);
1501         } else {
1502                 bool any_slot_active = false;
1503
1504                 spin_lock_bh(&host->lock);
1505                 slot->clock = 0;
1506                 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1507                         if (host->slot[i] && host->slot[i]->clock) {
1508                                 any_slot_active = true;
1509                                 break;
1510                         }
1511                 }
1512                 if (!any_slot_active) {
1513                         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1514                         if (host->mode_reg) {
1515                                 atmci_readl(host, ATMCI_MR);
1516                         }
1517                         host->mode_reg = 0;
1518                 }
1519                 spin_unlock_bh(&host->lock);
1520         }
1521
1522         switch (ios->power_mode) {
1523         case MMC_POWER_OFF:
1524                 if (!IS_ERR(mmc->supply.vmmc))
1525                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1526                 break;
1527         case MMC_POWER_UP:
1528                 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1529                 if (!IS_ERR(mmc->supply.vmmc))
1530                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1531                 break;
1532         default:
1533                 break;
1534         }
1535 }
1536
1537 static int atmci_get_ro(struct mmc_host *mmc)
1538 {
1539         int                     read_only = -ENOSYS;
1540         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1541
1542         if (slot->wp_pin) {
1543                 read_only = gpiod_get_value(slot->wp_pin);
1544                 dev_dbg(&mmc->class_dev, "card is %s\n",
1545                                 read_only ? "read-only" : "read-write");
1546         }
1547
1548         return read_only;
1549 }
1550
1551 static int atmci_get_cd(struct mmc_host *mmc)
1552 {
1553         int                     present = -ENOSYS;
1554         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1555
1556         if (slot->detect_pin) {
1557                 present = gpiod_get_value_cansleep(slot->detect_pin);
1558                 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1559                                 present ? "" : "not ");
1560         }
1561
1562         return present;
1563 }
1564
1565 static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1566 {
1567         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1568         struct atmel_mci        *host = slot->host;
1569
1570         if (enable)
1571                 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1572         else
1573                 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1574 }
1575
1576 static const struct mmc_host_ops atmci_ops = {
1577         .request        = atmci_request,
1578         .set_ios        = atmci_set_ios,
1579         .get_ro         = atmci_get_ro,
1580         .get_cd         = atmci_get_cd,
1581         .enable_sdio_irq = atmci_enable_sdio_irq,
1582 };
1583
1584 /* Called with host->lock held */
1585 static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1586         __releases(&host->lock)
1587         __acquires(&host->lock)
1588 {
1589         struct atmel_mci_slot   *slot = NULL;
1590         struct mmc_host         *prev_mmc = host->cur_slot->mmc;
1591         struct device           *dev = host->dev;
1592
1593         WARN_ON(host->cmd || host->data);
1594
1595         del_timer(&host->timer);
1596
1597         /*
1598          * Update the MMC clock rate if necessary. This may be
1599          * necessary if set_ios() is called when a different slot is
1600          * busy transferring data.
1601          */
1602         if (host->need_clock_update) {
1603                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1604                 if (host->caps.has_cfg_reg)
1605                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1606         }
1607
1608         host->cur_slot->mrq = NULL;
1609         host->mrq = NULL;
1610         if (!list_empty(&host->queue)) {
1611                 slot = list_entry(host->queue.next,
1612                                 struct atmel_mci_slot, queue_node);
1613                 list_del(&slot->queue_node);
1614                 dev_vdbg(dev, "list not empty: %s is next\n", mmc_hostname(slot->mmc));
1615                 host->state = STATE_SENDING_CMD;
1616                 atmci_start_request(host, slot);
1617         } else {
1618                 dev_vdbg(dev, "list empty\n");
1619                 host->state = STATE_IDLE;
1620         }
1621
1622         spin_unlock(&host->lock);
1623         mmc_request_done(prev_mmc, mrq);
1624         spin_lock(&host->lock);
1625 }
1626
1627 static void atmci_command_complete(struct atmel_mci *host,
1628                         struct mmc_command *cmd)
1629 {
1630         u32             status = host->cmd_status;
1631
1632         /* Read the response from the card (up to 16 bytes) */
1633         cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1634         cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1635         cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1636         cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1637
1638         if (status & ATMCI_RTOE)
1639                 cmd->error = -ETIMEDOUT;
1640         else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1641                 cmd->error = -EILSEQ;
1642         else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1643                 cmd->error = -EIO;
1644         else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1645                 if (host->caps.need_blksz_mul_4) {
1646                         cmd->error = -EINVAL;
1647                         host->need_reset = 1;
1648                 }
1649         } else
1650                 cmd->error = 0;
1651 }
1652
1653 static void atmci_detect_change(struct timer_list *t)
1654 {
1655         struct atmel_mci_slot   *slot = from_timer(slot, t, detect_timer);
1656         bool                    present;
1657         bool                    present_old;
1658
1659         /*
1660          * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1661          * freeing the interrupt. We must not re-enable the interrupt
1662          * if it has been freed, and if we're shutting down, it
1663          * doesn't really matter whether the card is present or not.
1664          */
1665         smp_rmb();
1666         if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1667                 return;
1668
1669         enable_irq(gpiod_to_irq(slot->detect_pin));
1670         present = gpiod_get_value_cansleep(slot->detect_pin);
1671         present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1672
1673         dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1674                         present, present_old);
1675
1676         if (present != present_old) {
1677                 struct atmel_mci        *host = slot->host;
1678                 struct mmc_request      *mrq;
1679
1680                 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1681                         present ? "inserted" : "removed");
1682
1683                 spin_lock(&host->lock);
1684
1685                 if (!present)
1686                         clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1687                 else
1688                         set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1689
1690                 /* Clean up queue if present */
1691                 mrq = slot->mrq;
1692                 if (mrq) {
1693                         if (mrq == host->mrq) {
1694                                 /*
1695                                  * Reset controller to terminate any ongoing
1696                                  * commands or data transfers.
1697                                  */
1698                                 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1699                                 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1700                                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1701                                 if (host->caps.has_cfg_reg)
1702                                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1703
1704                                 host->data = NULL;
1705                                 host->cmd = NULL;
1706
1707                                 switch (host->state) {
1708                                 case STATE_IDLE:
1709                                         break;
1710                                 case STATE_SENDING_CMD:
1711                                         mrq->cmd->error = -ENOMEDIUM;
1712                                         if (mrq->data)
1713                                                 host->stop_transfer(host);
1714                                         break;
1715                                 case STATE_DATA_XFER:
1716                                         mrq->data->error = -ENOMEDIUM;
1717                                         host->stop_transfer(host);
1718                                         break;
1719                                 case STATE_WAITING_NOTBUSY:
1720                                         mrq->data->error = -ENOMEDIUM;
1721                                         break;
1722                                 case STATE_SENDING_STOP:
1723                                         mrq->stop->error = -ENOMEDIUM;
1724                                         break;
1725                                 case STATE_END_REQUEST:
1726                                         break;
1727                                 }
1728
1729                                 atmci_request_end(host, mrq);
1730                         } else {
1731                                 list_del(&slot->queue_node);
1732                                 mrq->cmd->error = -ENOMEDIUM;
1733                                 if (mrq->data)
1734                                         mrq->data->error = -ENOMEDIUM;
1735                                 if (mrq->stop)
1736                                         mrq->stop->error = -ENOMEDIUM;
1737
1738                                 spin_unlock(&host->lock);
1739                                 mmc_request_done(slot->mmc, mrq);
1740                                 spin_lock(&host->lock);
1741                         }
1742                 }
1743                 spin_unlock(&host->lock);
1744
1745                 mmc_detect_change(slot->mmc, 0);
1746         }
1747 }
1748
1749 static void atmci_work_func(struct work_struct *t)
1750 {
1751         struct atmel_mci        *host = from_work(host, t, bh_work);
1752         struct mmc_request      *mrq = host->mrq;
1753         struct mmc_data         *data = host->data;
1754         struct device           *dev = host->dev;
1755         enum atmel_mci_state    state = host->state;
1756         enum atmel_mci_state    prev_state;
1757         u32                     status;
1758
1759         spin_lock(&host->lock);
1760
1761         state = host->state;
1762
1763         dev_vdbg(dev, "bh_work: state %u pending/completed/mask %lx/%lx/%x\n",
1764                 state, host->pending_events, host->completed_events,
1765                 atmci_readl(host, ATMCI_IMR));
1766
1767         do {
1768                 prev_state = state;
1769                 dev_dbg(dev, "FSM: state=%d\n", state);
1770
1771                 switch (state) {
1772                 case STATE_IDLE:
1773                         break;
1774
1775                 case STATE_SENDING_CMD:
1776                         /*
1777                          * Command has been sent, we are waiting for command
1778                          * ready. Then we have three next states possible:
1779                          * END_REQUEST by default, WAITING_NOTBUSY if it's a
1780                          * command needing it or DATA_XFER if there is data.
1781                          */
1782                         dev_dbg(dev, "FSM: cmd ready?\n");
1783                         if (!atmci_test_and_clear_pending(host,
1784                                                 EVENT_CMD_RDY))
1785                                 break;
1786
1787                         dev_dbg(dev, "set completed cmd ready\n");
1788                         host->cmd = NULL;
1789                         atmci_set_completed(host, EVENT_CMD_RDY);
1790                         atmci_command_complete(host, mrq->cmd);
1791                         if (mrq->data) {
1792                                 dev_dbg(dev, "command with data transfer\n");
1793                                 /*
1794                                  * If there is a command error don't start
1795                                  * data transfer.
1796                                  */
1797                                 if (mrq->cmd->error) {
1798                                         host->stop_transfer(host);
1799                                         host->data = NULL;
1800                                         atmci_writel(host, ATMCI_IDR,
1801                                                      ATMCI_TXRDY | ATMCI_RXRDY
1802                                                      | ATMCI_DATA_ERROR_FLAGS);
1803                                         state = STATE_END_REQUEST;
1804                                 } else
1805                                         state = STATE_DATA_XFER;
1806                         } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1807                                 dev_dbg(dev, "command response need waiting notbusy\n");
1808                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1809                                 state = STATE_WAITING_NOTBUSY;
1810                         } else
1811                                 state = STATE_END_REQUEST;
1812
1813                         break;
1814
1815                 case STATE_DATA_XFER:
1816                         if (atmci_test_and_clear_pending(host,
1817                                                 EVENT_DATA_ERROR)) {
1818                                 dev_dbg(dev, "set completed data error\n");
1819                                 atmci_set_completed(host, EVENT_DATA_ERROR);
1820                                 state = STATE_END_REQUEST;
1821                                 break;
1822                         }
1823
1824                         /*
1825                          * A data transfer is in progress. The event expected
1826                          * to move to the next state depends of data transfer
1827                          * type (PDC or DMA). Once transfer done we can move
1828                          * to the next step which is WAITING_NOTBUSY in write
1829                          * case and directly SENDING_STOP in read case.
1830                          */
1831                         dev_dbg(dev, "FSM: xfer complete?\n");
1832                         if (!atmci_test_and_clear_pending(host,
1833                                                 EVENT_XFER_COMPLETE))
1834                                 break;
1835
1836                         dev_dbg(dev, "(%s) set completed xfer complete\n", __func__);
1837                         atmci_set_completed(host, EVENT_XFER_COMPLETE);
1838
1839                         if (host->caps.need_notbusy_for_read_ops ||
1840                            (host->data->flags & MMC_DATA_WRITE)) {
1841                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1842                                 state = STATE_WAITING_NOTBUSY;
1843                         } else if (host->mrq->stop) {
1844                                 atmci_send_stop_cmd(host, data);
1845                                 state = STATE_SENDING_STOP;
1846                         } else {
1847                                 host->data = NULL;
1848                                 data->bytes_xfered = data->blocks * data->blksz;
1849                                 data->error = 0;
1850                                 state = STATE_END_REQUEST;
1851                         }
1852                         break;
1853
1854                 case STATE_WAITING_NOTBUSY:
1855                         /*
1856                          * We can be in the state for two reasons: a command
1857                          * requiring waiting not busy signal (stop command
1858                          * included) or a write operation. In the latest case,
1859                          * we need to send a stop command.
1860                          */
1861                         dev_dbg(dev, "FSM: not busy?\n");
1862                         if (!atmci_test_and_clear_pending(host,
1863                                                 EVENT_NOTBUSY))
1864                                 break;
1865
1866                         dev_dbg(dev, "set completed not busy\n");
1867                         atmci_set_completed(host, EVENT_NOTBUSY);
1868
1869                         if (host->data) {
1870                                 /*
1871                                  * For some commands such as CMD53, even if
1872                                  * there is data transfer, there is no stop
1873                                  * command to send.
1874                                  */
1875                                 if (host->mrq->stop) {
1876                                         atmci_send_stop_cmd(host, data);
1877                                         state = STATE_SENDING_STOP;
1878                                 } else {
1879                                         host->data = NULL;
1880                                         data->bytes_xfered = data->blocks
1881                                                              * data->blksz;
1882                                         data->error = 0;
1883                                         state = STATE_END_REQUEST;
1884                                 }
1885                         } else
1886                                 state = STATE_END_REQUEST;
1887                         break;
1888
1889                 case STATE_SENDING_STOP:
1890                         /*
1891                          * In this state, it is important to set host->data to
1892                          * NULL (which is tested in the waiting notbusy state)
1893                          * in order to go to the end request state instead of
1894                          * sending stop again.
1895                          */
1896                         dev_dbg(dev, "FSM: cmd ready?\n");
1897                         if (!atmci_test_and_clear_pending(host,
1898                                                 EVENT_CMD_RDY))
1899                                 break;
1900
1901                         dev_dbg(dev, "FSM: cmd ready\n");
1902                         host->cmd = NULL;
1903                         data->bytes_xfered = data->blocks * data->blksz;
1904                         data->error = 0;
1905                         atmci_command_complete(host, mrq->stop);
1906                         if (mrq->stop->error) {
1907                                 host->stop_transfer(host);
1908                                 atmci_writel(host, ATMCI_IDR,
1909                                              ATMCI_TXRDY | ATMCI_RXRDY
1910                                              | ATMCI_DATA_ERROR_FLAGS);
1911                                 state = STATE_END_REQUEST;
1912                         } else {
1913                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1914                                 state = STATE_WAITING_NOTBUSY;
1915                         }
1916                         host->data = NULL;
1917                         break;
1918
1919                 case STATE_END_REQUEST:
1920                         atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1921                                            | ATMCI_DATA_ERROR_FLAGS);
1922                         status = host->data_status;
1923                         if (unlikely(status)) {
1924                                 host->stop_transfer(host);
1925                                 host->data = NULL;
1926                                 if (data) {
1927                                         if (status & ATMCI_DTOE) {
1928                                                 data->error = -ETIMEDOUT;
1929                                         } else if (status & ATMCI_DCRCE) {
1930                                                 data->error = -EILSEQ;
1931                                         } else {
1932                                                 data->error = -EIO;
1933                                         }
1934                                 }
1935                         }
1936
1937                         atmci_request_end(host, host->mrq);
1938                         goto unlock; /* atmci_request_end() sets host->state */
1939                         break;
1940                 }
1941         } while (state != prev_state);
1942
1943         host->state = state;
1944
1945 unlock:
1946         spin_unlock(&host->lock);
1947 }
1948
1949 static void atmci_read_data_pio(struct atmel_mci *host)
1950 {
1951         struct scatterlist      *sg = host->sg;
1952         unsigned int            offset = host->pio_offset;
1953         struct mmc_data         *data = host->data;
1954         u32                     value;
1955         u32                     status;
1956         unsigned int            nbytes = 0;
1957
1958         do {
1959                 value = atmci_readl(host, ATMCI_RDR);
1960                 if (likely(offset + 4 <= sg->length)) {
1961                         sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
1962
1963                         offset += 4;
1964                         nbytes += 4;
1965
1966                         if (offset == sg->length) {
1967                                 flush_dcache_page(sg_page(sg));
1968                                 host->sg = sg = sg_next(sg);
1969                                 host->sg_len--;
1970                                 if (!sg || !host->sg_len)
1971                                         goto done;
1972
1973                                 offset = 0;
1974                         }
1975                 } else {
1976                         unsigned int remaining = sg->length - offset;
1977
1978                         sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
1979                         nbytes += remaining;
1980
1981                         flush_dcache_page(sg_page(sg));
1982                         host->sg = sg = sg_next(sg);
1983                         host->sg_len--;
1984                         if (!sg || !host->sg_len)
1985                                 goto done;
1986
1987                         offset = 4 - remaining;
1988                         sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
1989                                         offset, 0);
1990                         nbytes += offset;
1991                 }
1992
1993                 status = atmci_readl(host, ATMCI_SR);
1994                 if (status & ATMCI_DATA_ERROR_FLAGS) {
1995                         atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1996                                                 | ATMCI_DATA_ERROR_FLAGS));
1997                         host->data_status = status;
1998                         data->bytes_xfered += nbytes;
1999                         return;
2000                 }
2001         } while (status & ATMCI_RXRDY);
2002
2003         host->pio_offset = offset;
2004         data->bytes_xfered += nbytes;
2005
2006         return;
2007
2008 done:
2009         atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
2010         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
2011         data->bytes_xfered += nbytes;
2012         smp_wmb();
2013         atmci_set_pending(host, EVENT_XFER_COMPLETE);
2014 }
2015
2016 static void atmci_write_data_pio(struct atmel_mci *host)
2017 {
2018         struct scatterlist      *sg = host->sg;
2019         unsigned int            offset = host->pio_offset;
2020         struct mmc_data         *data = host->data;
2021         u32                     value;
2022         u32                     status;
2023         unsigned int            nbytes = 0;
2024
2025         do {
2026                 if (likely(offset + 4 <= sg->length)) {
2027                         sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
2028                         atmci_writel(host, ATMCI_TDR, value);
2029
2030                         offset += 4;
2031                         nbytes += 4;
2032                         if (offset == sg->length) {
2033                                 host->sg = sg = sg_next(sg);
2034                                 host->sg_len--;
2035                                 if (!sg || !host->sg_len)
2036                                         goto done;
2037
2038                                 offset = 0;
2039                         }
2040                 } else {
2041                         unsigned int remaining = sg->length - offset;
2042
2043                         value = 0;
2044                         sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
2045                         nbytes += remaining;
2046
2047                         host->sg = sg = sg_next(sg);
2048                         host->sg_len--;
2049                         if (!sg || !host->sg_len) {
2050                                 atmci_writel(host, ATMCI_TDR, value);
2051                                 goto done;
2052                         }
2053
2054                         offset = 4 - remaining;
2055                         sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
2056                                         offset, 0);
2057                         atmci_writel(host, ATMCI_TDR, value);
2058                         nbytes += offset;
2059                 }
2060
2061                 status = atmci_readl(host, ATMCI_SR);
2062                 if (status & ATMCI_DATA_ERROR_FLAGS) {
2063                         atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
2064                                                 | ATMCI_DATA_ERROR_FLAGS));
2065                         host->data_status = status;
2066                         data->bytes_xfered += nbytes;
2067                         return;
2068                 }
2069         } while (status & ATMCI_TXRDY);
2070
2071         host->pio_offset = offset;
2072         data->bytes_xfered += nbytes;
2073
2074         return;
2075
2076 done:
2077         atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
2078         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
2079         data->bytes_xfered += nbytes;
2080         smp_wmb();
2081         atmci_set_pending(host, EVENT_XFER_COMPLETE);
2082 }
2083
2084 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
2085 {
2086         int     i;
2087
2088         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2089                 struct atmel_mci_slot *slot = host->slot[i];
2090                 if (slot && (status & slot->sdio_irq)) {
2091                         mmc_signal_sdio_irq(slot->mmc);
2092                 }
2093         }
2094 }
2095
2096
2097 static irqreturn_t atmci_interrupt(int irq, void *dev_id)
2098 {
2099         struct atmel_mci        *host = dev_id;
2100         struct device           *dev = host->dev;
2101         u32                     status, mask, pending;
2102         unsigned int            pass_count = 0;
2103
2104         do {
2105                 status = atmci_readl(host, ATMCI_SR);
2106                 mask = atmci_readl(host, ATMCI_IMR);
2107                 pending = status & mask;
2108                 if (!pending)
2109                         break;
2110
2111                 if (pending & ATMCI_DATA_ERROR_FLAGS) {
2112                         dev_dbg(dev, "IRQ: data error\n");
2113                         atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
2114                                         | ATMCI_RXRDY | ATMCI_TXRDY
2115                                         | ATMCI_ENDRX | ATMCI_ENDTX
2116                                         | ATMCI_RXBUFF | ATMCI_TXBUFE);
2117
2118                         host->data_status = status;
2119                         dev_dbg(dev, "set pending data error\n");
2120                         smp_wmb();
2121                         atmci_set_pending(host, EVENT_DATA_ERROR);
2122                         queue_work(system_bh_wq, &host->bh_work);
2123                 }
2124
2125                 if (pending & ATMCI_TXBUFE) {
2126                         dev_dbg(dev, "IRQ: tx buffer empty\n");
2127                         atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
2128                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2129                         /*
2130                          * We can receive this interruption before having configured
2131                          * the second pdc buffer, so we need to reconfigure first and
2132                          * second buffers again
2133                          */
2134                         if (host->data_size) {
2135                                 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
2136                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2137                                 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
2138                         } else {
2139                                 atmci_pdc_complete(host);
2140                         }
2141                 } else if (pending & ATMCI_ENDTX) {
2142                         dev_dbg(dev, "IRQ: end of tx buffer\n");
2143                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2144
2145                         if (host->data_size) {
2146                                 atmci_pdc_set_single_buf(host,
2147                                                 XFER_TRANSMIT, PDC_SECOND_BUF);
2148                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2149                         }
2150                 }
2151
2152                 if (pending & ATMCI_RXBUFF) {
2153                         dev_dbg(dev, "IRQ: rx buffer full\n");
2154                         atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2155                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2156                         /*
2157                          * We can receive this interruption before having configured
2158                          * the second pdc buffer, so we need to reconfigure first and
2159                          * second buffers again
2160                          */
2161                         if (host->data_size) {
2162                                 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2163                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2164                                 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2165                         } else {
2166                                 atmci_pdc_complete(host);
2167                         }
2168                 } else if (pending & ATMCI_ENDRX) {
2169                         dev_dbg(dev, "IRQ: end of rx buffer\n");
2170                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2171
2172                         if (host->data_size) {
2173                                 atmci_pdc_set_single_buf(host,
2174                                                 XFER_RECEIVE, PDC_SECOND_BUF);
2175                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2176                         }
2177                 }
2178
2179                 /*
2180                  * First mci IPs, so mainly the ones having pdc, have some
2181                  * issues with the notbusy signal. You can't get it after
2182                  * data transmission if you have not sent a stop command.
2183                  * The appropriate workaround is to use the BLKE signal.
2184                  */
2185                 if (pending & ATMCI_BLKE) {
2186                         dev_dbg(dev, "IRQ: blke\n");
2187                         atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2188                         smp_wmb();
2189                         dev_dbg(dev, "set pending notbusy\n");
2190                         atmci_set_pending(host, EVENT_NOTBUSY);
2191                         queue_work(system_bh_wq, &host->bh_work);
2192                 }
2193
2194                 if (pending & ATMCI_NOTBUSY) {
2195                         dev_dbg(dev, "IRQ: not_busy\n");
2196                         atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2197                         smp_wmb();
2198                         dev_dbg(dev, "set pending notbusy\n");
2199                         atmci_set_pending(host, EVENT_NOTBUSY);
2200                         queue_work(system_bh_wq, &host->bh_work);
2201                 }
2202
2203                 if (pending & ATMCI_RXRDY)
2204                         atmci_read_data_pio(host);
2205                 if (pending & ATMCI_TXRDY)
2206                         atmci_write_data_pio(host);
2207
2208                 if (pending & ATMCI_CMDRDY) {
2209                         dev_dbg(dev, "IRQ: cmd ready\n");
2210                         atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2211                         host->cmd_status = status;
2212                         smp_wmb();
2213                         dev_dbg(dev, "set pending cmd rdy\n");
2214                         atmci_set_pending(host, EVENT_CMD_RDY);
2215                         queue_work(system_bh_wq, &host->bh_work);
2216                 }
2217
2218                 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2219                         atmci_sdio_interrupt(host, status);
2220
2221         } while (pass_count++ < 5);
2222
2223         return pass_count ? IRQ_HANDLED : IRQ_NONE;
2224 }
2225
2226 static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2227 {
2228         struct atmel_mci_slot   *slot = dev_id;
2229
2230         /*
2231          * Disable interrupts until the pin has stabilized and check
2232          * the state then. Use mod_timer() since we may be in the
2233          * middle of the timer routine when this interrupt triggers.
2234          */
2235         disable_irq_nosync(irq);
2236         mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2237
2238         return IRQ_HANDLED;
2239 }
2240
2241 static int atmci_init_slot(struct atmel_mci *host,
2242                 struct mci_slot_pdata *slot_data, unsigned int id,
2243                 u32 sdc_reg, u32 sdio_irq)
2244 {
2245         struct device                   *dev = host->dev;
2246         struct mmc_host                 *mmc;
2247         struct atmel_mci_slot           *slot;
2248         int ret;
2249
2250         mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), dev);
2251         if (!mmc)
2252                 return -ENOMEM;
2253
2254         slot = mmc_priv(mmc);
2255         slot->mmc = mmc;
2256         slot->host = host;
2257         slot->detect_pin = slot_data->detect_pin;
2258         slot->wp_pin = slot_data->wp_pin;
2259         slot->sdc_reg = sdc_reg;
2260         slot->sdio_irq = sdio_irq;
2261
2262         dev_dbg(&mmc->class_dev,
2263                 "slot[%u]: bus_width=%u, detect_pin=%d, "
2264                 "detect_is_active_high=%s, wp_pin=%d\n",
2265                 id, slot_data->bus_width, desc_to_gpio(slot_data->detect_pin),
2266                 !gpiod_is_active_low(slot_data->detect_pin) ? "true" : "false",
2267                 desc_to_gpio(slot_data->wp_pin));
2268
2269         mmc->ops = &atmci_ops;
2270         mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2271         mmc->f_max = host->bus_hz / 2;
2272         mmc->ocr_avail  = MMC_VDD_32_33 | MMC_VDD_33_34;
2273         if (sdio_irq)
2274                 mmc->caps |= MMC_CAP_SDIO_IRQ;
2275         if (host->caps.has_highspeed)
2276                 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2277         /*
2278          * Without the read/write proof capability, it is strongly suggested to
2279          * use only one bit for data to prevent fifo underruns and overruns
2280          * which will corrupt data.
2281          */
2282         if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) {
2283                 mmc->caps |= MMC_CAP_4_BIT_DATA;
2284                 if (slot_data->bus_width >= 8)
2285                         mmc->caps |= MMC_CAP_8_BIT_DATA;
2286         }
2287
2288         if (atmci_get_version(host) < 0x200) {
2289                 mmc->max_segs = 256;
2290                 mmc->max_blk_size = 4095;
2291                 mmc->max_blk_count = 256;
2292                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2293                 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2294         } else {
2295                 mmc->max_segs = 64;
2296                 mmc->max_req_size = 32768 * 512;
2297                 mmc->max_blk_size = 32768;
2298                 mmc->max_blk_count = 512;
2299         }
2300
2301         /* Assume card is present initially */
2302         set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2303         if (slot->detect_pin) {
2304                 if (!gpiod_get_value_cansleep(slot->detect_pin))
2305                         clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2306         } else {
2307                 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2308         }
2309
2310         if (!slot->detect_pin) {
2311                 if (slot_data->non_removable)
2312                         mmc->caps |= MMC_CAP_NONREMOVABLE;
2313                 else
2314                         mmc->caps |= MMC_CAP_NEEDS_POLL;
2315         }
2316
2317         if (!slot->wp_pin)
2318                 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2319
2320         host->slot[id] = slot;
2321         mmc_regulator_get_supply(mmc);
2322         ret = mmc_add_host(mmc);
2323         if (ret) {
2324                 mmc_free_host(mmc);
2325                 return ret;
2326         }
2327
2328         if (slot->detect_pin) {
2329                 timer_setup(&slot->detect_timer, atmci_detect_change, 0);
2330
2331                 ret = request_irq(gpiod_to_irq(slot->detect_pin),
2332                                   atmci_detect_interrupt,
2333                                   IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2334                                   "mmc-detect", slot);
2335                 if (ret) {
2336                         dev_dbg(&mmc->class_dev,
2337                                 "could not request IRQ %d for detect pin\n",
2338                                 gpiod_to_irq(slot->detect_pin));
2339                         slot->detect_pin = NULL;
2340                 }
2341         }
2342
2343         atmci_init_debugfs(slot);
2344
2345         return 0;
2346 }
2347
2348 static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
2349                 unsigned int id)
2350 {
2351         /* Debugfs stuff is cleaned up by mmc core */
2352
2353         set_bit(ATMCI_SHUTDOWN, &slot->flags);
2354         smp_wmb();
2355
2356         mmc_remove_host(slot->mmc);
2357
2358         if (slot->detect_pin) {
2359                 free_irq(gpiod_to_irq(slot->detect_pin), slot);
2360                 del_timer_sync(&slot->detect_timer);
2361         }
2362
2363         slot->host->slot[id] = NULL;
2364         mmc_free_host(slot->mmc);
2365 }
2366
2367 static int atmci_configure_dma(struct atmel_mci *host)
2368 {
2369         struct device *dev = host->dev;
2370
2371         host->dma.chan = dma_request_chan(dev, "rxtx");
2372         if (IS_ERR(host->dma.chan))
2373                 return PTR_ERR(host->dma.chan);
2374
2375         dev_info(dev, "using %s for DMA transfers\n", dma_chan_name(host->dma.chan));
2376
2377         host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2378         host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2379         host->dma_conf.src_maxburst = 1;
2380         host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2381         host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2382         host->dma_conf.dst_maxburst = 1;
2383         host->dma_conf.device_fc = false;
2384
2385         return 0;
2386 }
2387
2388 /*
2389  * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2390  * HSMCI provides DMA support and a new config register but no more supports
2391  * PDC.
2392  */
2393 static void atmci_get_cap(struct atmel_mci *host)
2394 {
2395         struct device *dev = host->dev;
2396         unsigned int version;
2397
2398         version = atmci_get_version(host);
2399         dev_info(dev, "version: 0x%x\n", version);
2400
2401         host->caps.has_dma_conf_reg = false;
2402         host->caps.has_pdc = true;
2403         host->caps.has_cfg_reg = false;
2404         host->caps.has_cstor_reg = false;
2405         host->caps.has_highspeed = false;
2406         host->caps.has_rwproof = false;
2407         host->caps.has_odd_clk_div = false;
2408         host->caps.has_bad_data_ordering = true;
2409         host->caps.need_reset_after_xfer = true;
2410         host->caps.need_blksz_mul_4 = true;
2411         host->caps.need_notbusy_for_read_ops = false;
2412
2413         /* keep only major version number */
2414         switch (version & 0xf00) {
2415         case 0x600:
2416         case 0x500:
2417                 host->caps.has_odd_clk_div = true;
2418                 fallthrough;
2419         case 0x400:
2420         case 0x300:
2421                 host->caps.has_dma_conf_reg = true;
2422                 host->caps.has_pdc = false;
2423                 host->caps.has_cfg_reg = true;
2424                 host->caps.has_cstor_reg = true;
2425                 host->caps.has_highspeed = true;
2426                 fallthrough;
2427         case 0x200:
2428                 host->caps.has_rwproof = true;
2429                 host->caps.need_blksz_mul_4 = false;
2430                 host->caps.need_notbusy_for_read_ops = true;
2431                 fallthrough;
2432         case 0x100:
2433                 host->caps.has_bad_data_ordering = false;
2434                 host->caps.need_reset_after_xfer = false;
2435                 fallthrough;
2436         case 0x0:
2437                 break;
2438         default:
2439                 host->caps.has_pdc = false;
2440                 dev_warn(dev, "Unmanaged mci version, set minimum capabilities\n");
2441                 break;
2442         }
2443 }
2444
2445 static int atmci_probe(struct platform_device *pdev)
2446 {
2447         struct device                   *dev = &pdev->dev;
2448         struct atmel_mci                *host;
2449         struct resource                 *regs;
2450         unsigned int                    nr_slots;
2451         int                             irq;
2452         int                             ret, i;
2453
2454         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2455         if (!regs)
2456                 return -ENXIO;
2457
2458         irq = platform_get_irq(pdev, 0);
2459         if (irq < 0)
2460                 return irq;
2461
2462         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2463         if (!host)
2464                 return -ENOMEM;
2465
2466         host->dev = dev;
2467         spin_lock_init(&host->lock);
2468         INIT_LIST_HEAD(&host->queue);
2469
2470         ret = atmci_of_init(host);
2471         if (ret)
2472                 return dev_err_probe(dev, ret, "Slot information not available\n");
2473
2474         host->mck = devm_clk_get(dev, "mci_clk");
2475         if (IS_ERR(host->mck))
2476                 return PTR_ERR(host->mck);
2477
2478         host->regs = devm_ioremap(dev, regs->start, resource_size(regs));
2479         if (!host->regs)
2480                 return -ENOMEM;
2481
2482         ret = clk_prepare_enable(host->mck);
2483         if (ret)
2484                 return ret;
2485
2486         atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2487         host->bus_hz = clk_get_rate(host->mck);
2488
2489         host->mapbase = regs->start;
2490
2491         INIT_WORK(&host->bh_work, atmci_work_func);
2492
2493         ret = request_irq(irq, atmci_interrupt, 0, dev_name(dev), host);
2494         if (ret) {
2495                 clk_disable_unprepare(host->mck);
2496                 return ret;
2497         }
2498
2499         /* Get MCI capabilities and set operations according to it */
2500         atmci_get_cap(host);
2501         ret = atmci_configure_dma(host);
2502         if (ret == -EPROBE_DEFER)
2503                 goto err_dma_probe_defer;
2504         if (ret == 0) {
2505                 host->prepare_data = &atmci_prepare_data_dma;
2506                 host->submit_data = &atmci_submit_data_dma;
2507                 host->stop_transfer = &atmci_stop_transfer_dma;
2508         } else if (host->caps.has_pdc) {
2509                 dev_info(dev, "using PDC\n");
2510                 host->prepare_data = &atmci_prepare_data_pdc;
2511                 host->submit_data = &atmci_submit_data_pdc;
2512                 host->stop_transfer = &atmci_stop_transfer_pdc;
2513         } else {
2514                 dev_info(dev, "using PIO\n");
2515                 host->prepare_data = &atmci_prepare_data;
2516                 host->submit_data = &atmci_submit_data;
2517                 host->stop_transfer = &atmci_stop_transfer;
2518         }
2519
2520         platform_set_drvdata(pdev, host);
2521
2522         timer_setup(&host->timer, atmci_timeout_timer, 0);
2523
2524         pm_runtime_get_noresume(dev);
2525         pm_runtime_set_active(dev);
2526         pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY);
2527         pm_runtime_use_autosuspend(dev);
2528         pm_runtime_enable(dev);
2529
2530         /* We need at least one slot to succeed */
2531         nr_slots = 0;
2532         ret = -ENODEV;
2533         if (host->pdata[0].bus_width) {
2534                 ret = atmci_init_slot(host, &host->pdata[0],
2535                                 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2536                 if (!ret) {
2537                         nr_slots++;
2538                         host->buf_size = host->slot[0]->mmc->max_req_size;
2539                 }
2540         }
2541         if (host->pdata[1].bus_width) {
2542                 ret = atmci_init_slot(host, &host->pdata[1],
2543                                 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2544                 if (!ret) {
2545                         nr_slots++;
2546                         if (host->slot[1]->mmc->max_req_size > host->buf_size)
2547                                 host->buf_size =
2548                                         host->slot[1]->mmc->max_req_size;
2549                 }
2550         }
2551
2552         if (!nr_slots) {
2553                 dev_err_probe(dev, ret, "init failed: no slot defined\n");
2554                 goto err_init_slot;
2555         }
2556
2557         if (!host->caps.has_rwproof) {
2558                 host->buffer = dma_alloc_coherent(dev, host->buf_size,
2559                                                   &host->buf_phys_addr,
2560                                                   GFP_KERNEL);
2561                 if (!host->buffer) {
2562                         ret = dev_err_probe(dev, -ENOMEM, "buffer allocation failed\n");
2563                         goto err_dma_alloc;
2564                 }
2565         }
2566
2567         dev_info(dev, "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2568                  host->mapbase, irq, nr_slots);
2569
2570         pm_runtime_mark_last_busy(dev);
2571         pm_runtime_put_autosuspend(dev);
2572
2573         return 0;
2574
2575 err_dma_alloc:
2576         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2577                 if (host->slot[i])
2578                         atmci_cleanup_slot(host->slot[i], i);
2579         }
2580 err_init_slot:
2581         clk_disable_unprepare(host->mck);
2582
2583         pm_runtime_disable(dev);
2584         pm_runtime_put_noidle(dev);
2585
2586         del_timer_sync(&host->timer);
2587         if (!IS_ERR(host->dma.chan))
2588                 dma_release_channel(host->dma.chan);
2589 err_dma_probe_defer:
2590         free_irq(irq, host);
2591         return ret;
2592 }
2593
2594 static void atmci_remove(struct platform_device *pdev)
2595 {
2596         struct atmel_mci        *host = platform_get_drvdata(pdev);
2597         struct device           *dev = &pdev->dev;
2598         unsigned int            i;
2599
2600         pm_runtime_get_sync(dev);
2601
2602         if (host->buffer)
2603                 dma_free_coherent(dev, host->buf_size, host->buffer, host->buf_phys_addr);
2604
2605         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2606                 if (host->slot[i])
2607                         atmci_cleanup_slot(host->slot[i], i);
2608         }
2609
2610         atmci_writel(host, ATMCI_IDR, ~0UL);
2611         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2612         atmci_readl(host, ATMCI_SR);
2613
2614         del_timer_sync(&host->timer);
2615         if (!IS_ERR(host->dma.chan))
2616                 dma_release_channel(host->dma.chan);
2617
2618         free_irq(platform_get_irq(pdev, 0), host);
2619
2620         clk_disable_unprepare(host->mck);
2621
2622         pm_runtime_disable(dev);
2623         pm_runtime_put_noidle(dev);
2624 }
2625
2626 #ifdef CONFIG_PM
2627 static int atmci_runtime_suspend(struct device *dev)
2628 {
2629         struct atmel_mci *host = dev_get_drvdata(dev);
2630
2631         clk_disable_unprepare(host->mck);
2632
2633         pinctrl_pm_select_sleep_state(dev);
2634
2635         return 0;
2636 }
2637
2638 static int atmci_runtime_resume(struct device *dev)
2639 {
2640         struct atmel_mci *host = dev_get_drvdata(dev);
2641
2642         pinctrl_select_default_state(dev);
2643
2644         return clk_prepare_enable(host->mck);
2645 }
2646 #endif
2647
2648 static const struct dev_pm_ops atmci_dev_pm_ops = {
2649         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2650                                 pm_runtime_force_resume)
2651         SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
2652 };
2653
2654 static struct platform_driver atmci_driver = {
2655         .probe          = atmci_probe,
2656         .remove         = atmci_remove,
2657         .driver         = {
2658                 .name           = "atmel_mci",
2659                 .probe_type     = PROBE_PREFER_ASYNCHRONOUS,
2660                 .of_match_table = atmci_dt_ids,
2661                 .pm             = &atmci_dev_pm_ops,
2662         },
2663 };
2664 module_platform_driver(atmci_driver);
2665
2666 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2667 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2668 MODULE_LICENSE("GPL v2");
This page took 0.183422 seconds and 4 git commands to generate.