2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2012-2017 Cavium Inc.
16 #include <linux/bitfield.h>
17 #include <linux/delay.h>
18 #include <linux/dma-direction.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
24 #include <linux/module.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/scatterlist.h>
27 #include <linux/time.h>
31 const char *cvm_mmc_irq_names[] = {
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types. These are correct if MMC devices are
46 * being used. However, non-MMC devices like SD use command and
47 * response types that are unexpected by the host hardware.
49 * The command and response types can be overridden by supplying an
50 * XOR value that is applied to the type. We calculate the XOR value
51 * from the values in this table and the flags passed from the MMC
54 static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
111 {0xff, 0xff}, /* CMD56 */
121 static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
123 struct cvm_mmc_cr_type *cr;
124 u8 hardware_ctype, hardware_rtype;
125 u8 desired_ctype = 0, desired_rtype = 0;
126 struct cvm_mmc_cr_mods r;
128 cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
129 hardware_ctype = cr->ctype;
130 hardware_rtype = cr->rtype;
131 if (cmd->opcode == MMC_GEN_CMD)
132 hardware_ctype = (cmd->arg & 1) ? 1 : 2;
134 switch (mmc_cmd_type(cmd)) {
136 desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
145 switch (mmc_resp_type(cmd)) {
149 case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
156 case MMC_RSP_R3: /* MMC_RSP_R4 */
160 r.ctype_xor = desired_ctype ^ hardware_ctype;
161 r.rtype_xor = desired_rtype ^ hardware_rtype;
165 static void check_switch_errors(struct cvm_mmc_host *host)
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
170 if (emm_switch & MIO_EMM_SWITCH_ERR0)
171 dev_err(host->dev, "Switch power class error\n");
172 if (emm_switch & MIO_EMM_SWITCH_ERR1)
173 dev_err(host->dev, "Switch hs timing error\n");
174 if (emm_switch & MIO_EMM_SWITCH_ERR2)
175 dev_err(host->dev, "Switch bus width error\n");
178 static void clear_bus_id(u64 *reg)
180 u64 bus_id_mask = GENMASK_ULL(61, 60);
182 *reg &= ~bus_id_mask;
185 static void set_bus_id(u64 *reg, int bus_id)
188 *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
191 static int get_bus_id(u64 reg)
193 return FIELD_GET(GENMASK_ULL(61, 60), reg);
197 * We never set the switch_exe bit since that would interfere
198 * with the commands send by the MMC core.
200 static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
207 * Modes setting only taken from slot 0. Work around that hardware
208 * issue by first switching to slot 0.
210 bus_id = get_bus_id(emm_switch);
211 clear_bus_id(&emm_switch);
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
214 set_bus_id(&emm_switch, bus_id);
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
217 /* wait for the switch to finish */
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
220 if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
225 check_switch_errors(host);
228 static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
230 /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231 u64 match = 0x3001070fffffffffull;
233 return (slot->cached_switch & match) != (new_val & match);
236 static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
244 timeout = (slot->clock * ns) / NSEC_PER_SEC;
246 timeout = (slot->clock * 850ull) / 1000ull;
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
250 static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
252 struct cvm_mmc_host *host = slot->host;
253 u64 emm_switch, wdog;
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
256 emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
257 MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
258 set_bus_id(&emm_switch, slot->bus_id);
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261 do_switch(slot->host, emm_switch);
263 slot->cached_switch = emm_switch;
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
270 /* Switch to another slot if needed */
271 static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
273 struct cvm_mmc_host *host = slot->host;
274 struct cvm_mmc_slot *old_slot;
275 u64 emm_sample, emm_switch;
277 if (slot->bus_id == host->last_slot)
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281 old_slot = host->slot[host->last_slot];
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
287 emm_switch = slot->cached_switch;
288 set_bus_id(&emm_switch, slot->bus_id);
289 do_switch(host, emm_switch);
291 emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
295 host->last_slot = slot->bus_id;
298 static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
301 struct sg_mapping_iter *smi = &host->smi;
302 int data_len = req->data->blocks * req->data->blksz;
303 int bytes_xfered, shift = -1;
306 /* Auto inc from offset zero */
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
309 for (bytes_xfered = 0; bytes_xfered < data_len;) {
310 if (smi->consumed >= smi->length) {
311 if (!sg_miter_next(smi))
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host));
321 while (smi->consumed < smi->length && shift >= 0) {
322 ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
330 req->data->bytes_xfered = bytes_xfered;
331 req->data->error = 0;
334 static void do_write(struct mmc_request *req)
336 req->data->bytes_xfered = req->data->blocks * req->data->blksz;
337 req->data->error = 0;
340 static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
345 if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
350 switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
353 req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
354 req->cmd->resp[1] = 0;
355 req->cmd->resp[2] = 0;
356 req->cmd->resp[3] = 0;
359 req->cmd->resp[3] = rsp_lo & 0xffffffff;
360 req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
362 req->cmd->resp[1] = rsp_hi & 0xffffffff;
363 req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
368 static int get_dma_dir(struct mmc_data *data)
370 return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
373 static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
375 data->bytes_xfered = data->blocks * data->blksz;
377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
381 static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
386 /* Check if there are any pending requests left */
387 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
388 count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
390 dev_err(host->dev, "%u requests still pending\n", count);
392 data->bytes_xfered = data->blocks * data->blksz;
395 /* Clear and disable FIFO */
396 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
401 static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
403 if (host->use_sg && data->sg_len > 1)
404 return finish_dma_sg(host, data);
406 return finish_dma_single(host, data);
409 static int check_status(u64 rsp_sts)
411 if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
412 rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
413 rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
415 if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
416 rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
418 if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
423 /* Try to clean up failed DMA. */
424 static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
428 emm_dma = readq(host->base + MIO_EMM_DMA(host));
429 emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
430 FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
431 set_bus_id(&emm_dma, get_bus_id(rsp_sts));
432 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
435 irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
437 struct cvm_mmc_host *host = dev_id;
438 struct mmc_request *req;
439 u64 emm_int, rsp_sts;
442 if (host->need_irq_handler_lock)
443 spin_lock(&host->irq_handler_lock);
445 __acquire(&host->irq_handler_lock);
447 /* Clear interrupt bits (write 1 clears ). */
448 emm_int = readq(host->base + MIO_EMM_INT(host));
449 writeq(emm_int, host->base + MIO_EMM_INT(host));
451 if (emm_int & MIO_EMM_INT_SWITCH_ERR)
452 check_switch_errors(host);
454 req = host->current_req;
458 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
460 * dma_val set means DMA is still in progress. Don't touch
461 * the request and wait for the interrupt indicating that
462 * the DMA is finished.
464 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
467 if (!host->dma_active && req->data &&
468 (emm_int & MIO_EMM_INT_BUF_DONE)) {
469 unsigned int type = (rsp_sts >> 7) & 3;
472 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
477 host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
478 emm_int & MIO_EMM_INT_DMA_DONE ||
479 emm_int & MIO_EMM_INT_CMD_ERR ||
480 emm_int & MIO_EMM_INT_DMA_ERR;
482 if (!(host_done && req->done))
485 req->cmd->error = check_status(rsp_sts);
487 if (host->dma_active && req->data)
488 if (!finish_dma(host, req->data))
491 set_cmd_response(host, req, rsp_sts);
492 if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
493 (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
494 cleanup_dma(host, rsp_sts);
496 host->current_req = NULL;
500 if (host->dmar_fixup_done)
501 host->dmar_fixup_done(host);
503 host->release_bus(host);
505 if (host->need_irq_handler_lock)
506 spin_unlock(&host->irq_handler_lock);
508 __release(&host->irq_handler_lock);
509 return IRQ_RETVAL(emm_int != 0);
513 * Program DMA_CFG and if needed DMA_ADR.
514 * Returns 0 on error, DMA address otherwise.
516 static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
521 count = dma_map_sg(host->dev, data->sg, data->sg_len,
526 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
527 dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
528 FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
529 #ifdef __LITTLE_ENDIAN
530 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
532 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
533 (sg_dma_len(&data->sg[0]) / 8) - 1);
535 addr = sg_dma_address(&data->sg[0]);
536 if (!host->big_dma_addr)
537 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
538 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
540 pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
541 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
543 if (host->big_dma_addr)
544 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
549 * Queue complete sg list into the FIFO.
550 * Returns 0 on error, 1 otherwise.
552 static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
554 struct scatterlist *sg;
558 count = dma_map_sg(host->dev, data->sg, data->sg_len,
565 /* Enable FIFO by removing CLR bit */
566 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
568 for_each_sg(data->sg, sg, count, i) {
569 /* Program DMA address */
570 addr = sg_dma_address(sg);
573 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
576 * If we have scatter-gather support we also have an extra
577 * register for the DMA addr, so no need to check
578 * host->big_dma_addr here.
580 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
581 fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
583 /* enable interrupts on the last element */
584 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
585 (i + 1 == count) ? 0 : 1);
587 #ifdef __LITTLE_ENDIAN
588 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
590 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
591 sg_dma_len(sg) / 8 - 1);
593 * The write copies the address and the command to the FIFO
594 * and increments the FIFO's COUNT field.
596 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
597 pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
598 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
602 * In difference to prepare_dma_single we don't return the
603 * address here, as it would not make sense for scatter-gather.
604 * The dma fixup is only required on models that don't support
605 * scatter-gather, so that is not a problem.
611 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
613 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
617 static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
619 if (host->use_sg && data->sg_len > 1)
620 return prepare_dma_sg(host, data);
622 return prepare_dma_single(host, data);
625 static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
627 struct cvm_mmc_slot *slot = mmc_priv(mmc);
630 emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
631 FIELD_PREP(MIO_EMM_DMA_SECTOR,
632 mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
633 FIELD_PREP(MIO_EMM_DMA_RW,
634 (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
635 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
636 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
637 set_bus_id(&emm_dma, slot->bus_id);
639 if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
640 (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
641 emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
643 pr_debug("[%s] blocks: %u multi: %d\n",
644 (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
645 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
649 static void cvm_mmc_dma_request(struct mmc_host *mmc,
650 struct mmc_request *mrq)
652 struct cvm_mmc_slot *slot = mmc_priv(mmc);
653 struct cvm_mmc_host *host = slot->host;
654 struct mmc_data *data;
657 if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
658 !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
659 dev_err(&mmc->card->dev, "Error: %s no data\n", __func__);
663 cvm_mmc_switch_to(slot);
666 pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
667 data->blocks, data->blksz, data->blocks * data->blksz);
668 if (data->timeout_ns)
669 set_wdog(slot, data->timeout_ns);
671 WARN_ON(host->current_req);
672 host->current_req = mrq;
674 emm_dma = prepare_ext_dma(mmc, mrq);
675 addr = prepare_dma(host, data);
677 dev_err(host->dev, "prepare_dma failed\n");
681 host->dma_active = true;
682 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
683 MIO_EMM_INT_DMA_ERR);
685 if (host->dmar_fixup)
686 host->dmar_fixup(host, mrq->cmd, data, addr);
689 * If we have a valid SD card in the slot, we set the response
690 * bit mask to check for CRC errors and timeouts only.
691 * Otherwise, use the default power reset value.
693 if (mmc_card_sd(mmc->card))
694 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
696 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
697 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
701 mrq->cmd->error = -EINVAL;
704 host->release_bus(host);
707 static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
709 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
710 SG_MITER_ATOMIC | SG_MITER_TO_SG);
713 static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
715 unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
716 struct sg_mapping_iter *smi = &host->smi;
717 unsigned int bytes_xfered;
721 /* Copy data to the xmit buffer before issuing the command. */
722 sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
724 /* Auto inc from offset zero, dbuf zero */
725 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
727 for (bytes_xfered = 0; bytes_xfered < data_len;) {
728 if (smi->consumed >= smi->length) {
729 if (!sg_miter_next(smi))
734 while (smi->consumed < smi->length && shift >= 0) {
735 dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
742 writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
750 static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
752 struct cvm_mmc_slot *slot = mmc_priv(mmc);
753 struct cvm_mmc_host *host = slot->host;
754 struct mmc_command *cmd = mrq->cmd;
755 struct cvm_mmc_cr_mods mods;
756 u64 emm_cmd, rsp_sts;
760 * Note about locking:
761 * All MMC devices share the same bus and controller. Allow only a
762 * single user of the bootbus/MMC bus at a time. The lock is acquired
763 * on all entry points from the MMC layer.
765 * For requests the lock is only released after the completion
768 host->acquire_bus(host);
770 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
771 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
772 return cvm_mmc_dma_request(mmc, mrq);
774 cvm_mmc_switch_to(slot);
776 mods = cvm_mmc_get_cr_mods(cmd);
778 WARN_ON(host->current_req);
779 host->current_req = mrq;
782 if (cmd->data->flags & MMC_DATA_READ)
783 do_read_request(host, mrq);
785 do_write_request(host, mrq);
787 if (cmd->data->timeout_ns)
788 set_wdog(slot, cmd->data->timeout_ns);
792 host->dma_active = false;
793 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
795 emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
796 FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
797 FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
798 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
799 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
800 set_bus_id(&emm_cmd, slot->bus_id);
801 if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
802 emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
803 64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
805 writeq(0, host->base + MIO_EMM_STS_MASK(host));
808 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
809 if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
810 rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
811 rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
812 rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
818 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
819 writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
822 static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
824 struct cvm_mmc_slot *slot = mmc_priv(mmc);
825 struct cvm_mmc_host *host = slot->host;
826 int clk_period = 0, power_class = 10, bus_width = 0;
827 u64 clock, emm_switch;
829 host->acquire_bus(host);
830 cvm_mmc_switch_to(slot);
832 /* Set the power state */
833 switch (ios->power_mode) {
838 cvm_mmc_reset_bus(slot);
839 if (host->global_pwr_gpiod)
840 host->set_shared_power(host, 0);
841 else if (!IS_ERR(mmc->supply.vmmc))
842 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
846 if (host->global_pwr_gpiod)
847 host->set_shared_power(host, 1);
848 else if (!IS_ERR(mmc->supply.vmmc))
849 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
853 /* Convert bus width to HW definition */
854 switch (ios->bus_width) {
855 case MMC_BUS_WIDTH_8:
858 case MMC_BUS_WIDTH_4:
861 case MMC_BUS_WIDTH_1:
866 /* DDR is available for 4/8 bit bus width */
867 if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
870 /* Change the clock frequency. */
872 if (clock > 52000000)
877 clk_period = (host->sys_freq + clock - 1) / (2 * clock);
879 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
880 (ios->timing == MMC_TIMING_MMC_HS)) |
881 FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
882 FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
883 FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
884 FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
885 set_bus_id(&emm_switch, slot->bus_id);
887 if (!switch_val_changed(slot, emm_switch))
891 do_switch(host, emm_switch);
892 slot->cached_switch = emm_switch;
894 host->release_bus(host);
897 static const struct mmc_host_ops cvm_mmc_ops = {
898 .request = cvm_mmc_request,
899 .set_ios = cvm_mmc_set_ios,
900 .get_ro = mmc_gpio_get_ro,
901 .get_cd = mmc_gpio_get_cd,
904 static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
906 struct mmc_host *mmc = slot->mmc;
908 clock = min(clock, mmc->f_max);
909 clock = max(clock, mmc->f_min);
913 static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
915 struct cvm_mmc_host *host = slot->host;
918 /* Enable this bus slot. */
919 host->emm_cfg |= (1ull << slot->bus_id);
920 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
923 /* Program initial clock speed and power. */
924 cvm_mmc_set_clock(slot, slot->mmc->f_min);
925 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
926 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
927 (host->sys_freq / slot->clock) / 2);
928 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
929 (host->sys_freq / slot->clock) / 2);
931 /* Make the changes take effect on this bus slot. */
932 set_bus_id(&emm_switch, slot->bus_id);
933 do_switch(host, emm_switch);
935 slot->cached_switch = emm_switch;
938 * Set watchdog timeout value and default reset value
939 * for the mask register. Finally, set the CARD_RCA
940 * bit so that we can get the card address relative
941 * to the CMD register for CMD7 transactions.
944 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
945 writeq(1, host->base + MIO_EMM_RCA(host));
949 static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
951 u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
952 struct device_node *node = dev->of_node;
953 struct mmc_host *mmc = slot->mmc;
957 ret = of_property_read_u32(node, "reg", &id);
959 dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
963 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
964 dev_err(dev, "Invalid reg property on %pOF\n", node);
968 ret = mmc_regulator_get_supply(mmc);
972 * Legacy Octeon firmware has no regulator entry, fall-back to
973 * a hard-coded voltage to get a sane OCR.
975 if (IS_ERR(mmc->supply.vmmc))
976 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
978 /* Common MMC bindings */
979 ret = mmc_of_parse(mmc);
984 if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
985 of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
987 mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
988 else if (bus_width == 4)
989 mmc->caps |= MMC_CAP_4_BIT_DATA;
992 /* Set maximum and minimum frequency */
994 of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
995 if (!mmc->f_max || mmc->f_max > 52000000)
996 mmc->f_max = 52000000;
999 /* Sampling register settings, period in picoseconds */
1000 clock_period = 1000000000000ull / slot->host->sys_freq;
1001 of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1002 of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1003 slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1004 slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1009 int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1011 struct cvm_mmc_slot *slot;
1012 struct mmc_host *mmc;
1015 mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1019 slot = mmc_priv(mmc);
1023 ret = cvm_mmc_of_parse(dev, slot);
1028 /* Set up host parameters */
1029 mmc->ops = &cvm_mmc_ops;
1032 * We only have a 3.3v supply, we cannot support any
1033 * of the UHS modes. We do support the high speed DDR
1034 * modes up to 52MHz.
1036 * Disable bounce buffers for max_segs = 1
1038 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1039 MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
1046 /* DMA size field can address up to 8 MB */
1047 mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1048 dma_get_max_seg_size(host->dev));
1049 mmc->max_req_size = mmc->max_seg_size;
1050 /* External DMA is in 512 byte blocks */
1051 mmc->max_blk_size = 512;
1052 /* DMA block count field is 15 bits */
1053 mmc->max_blk_count = 32767;
1055 slot->clock = mmc->f_min;
1057 slot->cached_rca = 1;
1059 host->acquire_bus(host);
1060 host->slot[id] = slot;
1061 cvm_mmc_switch_to(slot);
1062 cvm_mmc_init_lowlevel(slot);
1063 host->release_bus(host);
1065 ret = mmc_add_host(mmc);
1067 dev_err(dev, "mmc_add_host() returned %d\n", ret);
1068 slot->host->slot[id] = NULL;
1074 mmc_free_host(slot->mmc);
1078 int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1080 mmc_remove_host(slot->mmc);
1081 slot->host->slot[slot->bus_id] = NULL;
1082 mmc_free_host(slot->mmc);