2 * QEMU PowerPC 4xx embedded processors shared devices emulation
4 * Copyright (c) 2007 Jocelyn Mayer
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 //#define DEBUG_UNASSIGNED
36 # define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
38 # define LOG_UIC(...) do { } while (0)
41 /*****************************************************************************/
42 /* Generic PowerPC 4xx processor instanciation */
43 CPUState *ppc4xx_init (const char *cpu_model,
44 clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
50 env = cpu_init(cpu_model);
52 fprintf(stderr, "Unable to find PowerPC %s CPU definition\n",
56 cpu_clk->cb = NULL; /* We don't care about CPU clock frequency changes */
57 cpu_clk->opaque = env;
58 /* Set time-base frequency to sysclk */
59 tb_clk->cb = ppc_emb_timers_init(env, sysclk);
61 ppc_dcr_init(env, NULL, NULL);
62 /* Register qemu callbacks */
63 qemu_register_reset(&cpu_ppc_reset, env);
68 /*****************************************************************************/
69 /* Fake device used to map multiple devices in a single memory page */
70 #define MMIO_AREA_BITS 8
71 #define MMIO_AREA_LEN (1 << MMIO_AREA_BITS)
72 #define MMIO_AREA_NB (1 << (TARGET_PAGE_BITS - MMIO_AREA_BITS))
73 #define MMIO_IDX(addr) (((addr) >> MMIO_AREA_BITS) & (MMIO_AREA_NB - 1))
74 struct ppc4xx_mmio_t {
75 target_phys_addr_t base;
76 CPUReadMemoryFunc **mem_read[MMIO_AREA_NB];
77 CPUWriteMemoryFunc **mem_write[MMIO_AREA_NB];
78 void *opaque[MMIO_AREA_NB];
81 static uint32_t unassigned_mmio_readb (void *opaque, target_phys_addr_t addr)
83 #ifdef DEBUG_UNASSIGNED
87 printf("Unassigned mmio read 0x" PADDRX " base " PADDRX "\n",
94 static void unassigned_mmio_writeb (void *opaque,
95 target_phys_addr_t addr, uint32_t val)
97 #ifdef DEBUG_UNASSIGNED
101 printf("Unassigned mmio write 0x" PADDRX " = 0x%x base " PADDRX "\n",
102 addr, val, mmio->base);
106 static CPUReadMemoryFunc *unassigned_mmio_read[3] = {
107 unassigned_mmio_readb,
108 unassigned_mmio_readb,
109 unassigned_mmio_readb,
112 static CPUWriteMemoryFunc *unassigned_mmio_write[3] = {
113 unassigned_mmio_writeb,
114 unassigned_mmio_writeb,
115 unassigned_mmio_writeb,
118 static uint32_t mmio_readlen (ppc4xx_mmio_t *mmio,
119 target_phys_addr_t addr, int len)
121 CPUReadMemoryFunc **mem_read;
125 idx = MMIO_IDX(addr);
126 #if defined(DEBUG_MMIO)
127 printf("%s: mmio %p len %d addr " PADDRX " idx %d\n", __func__,
128 mmio, len, addr, idx);
130 mem_read = mmio->mem_read[idx];
131 ret = (*mem_read[len])(mmio->opaque[idx], addr);
136 static void mmio_writelen (ppc4xx_mmio_t *mmio,
137 target_phys_addr_t addr, uint32_t value, int len)
139 CPUWriteMemoryFunc **mem_write;
142 idx = MMIO_IDX(addr);
143 #if defined(DEBUG_MMIO)
144 printf("%s: mmio %p len %d addr " PADDRX " idx %d value %08" PRIx32 "\n",
145 __func__, mmio, len, addr, idx, value);
147 mem_write = mmio->mem_write[idx];
148 (*mem_write[len])(mmio->opaque[idx], addr, value);
151 static uint32_t mmio_readb (void *opaque, target_phys_addr_t addr)
153 #if defined(DEBUG_MMIO)
154 printf("%s: addr " PADDRX "\n", __func__, addr);
157 return mmio_readlen(opaque, addr, 0);
160 static void mmio_writeb (void *opaque,
161 target_phys_addr_t addr, uint32_t value)
163 #if defined(DEBUG_MMIO)
164 printf("%s: addr " PADDRX " val %08" PRIx32 "\n", __func__, addr, value);
166 mmio_writelen(opaque, addr, value, 0);
169 static uint32_t mmio_readw (void *opaque, target_phys_addr_t addr)
171 #if defined(DEBUG_MMIO)
172 printf("%s: addr " PADDRX "\n", __func__, addr);
175 return mmio_readlen(opaque, addr, 1);
178 static void mmio_writew (void *opaque,
179 target_phys_addr_t addr, uint32_t value)
181 #if defined(DEBUG_MMIO)
182 printf("%s: addr " PADDRX " val %08" PRIx32 "\n", __func__, addr, value);
184 mmio_writelen(opaque, addr, value, 1);
187 static uint32_t mmio_readl (void *opaque, target_phys_addr_t addr)
189 #if defined(DEBUG_MMIO)
190 printf("%s: addr " PADDRX "\n", __func__, addr);
193 return mmio_readlen(opaque, addr, 2);
196 static void mmio_writel (void *opaque,
197 target_phys_addr_t addr, uint32_t value)
199 #if defined(DEBUG_MMIO)
200 printf("%s: addr " PADDRX " val %08" PRIx32 "\n", __func__, addr, value);
202 mmio_writelen(opaque, addr, value, 2);
205 static CPUReadMemoryFunc *mmio_read[] = {
211 static CPUWriteMemoryFunc *mmio_write[] = {
217 int ppc4xx_mmio_register (CPUState *env, ppc4xx_mmio_t *mmio,
218 target_phys_addr_t offset, uint32_t len,
219 CPUReadMemoryFunc **mem_read,
220 CPUWriteMemoryFunc **mem_write, void *opaque)
222 target_phys_addr_t end;
225 if ((offset + len) > TARGET_PAGE_SIZE)
227 idx = MMIO_IDX(offset);
228 end = offset + len - 1;
229 eidx = MMIO_IDX(end);
230 #if defined(DEBUG_MMIO)
231 printf("%s: offset " PADDRX " len %08" PRIx32 " " PADDRX " %d %d\n",
232 __func__, offset, len, end, idx, eidx);
234 for (; idx <= eidx; idx++) {
235 mmio->mem_read[idx] = mem_read;
236 mmio->mem_write[idx] = mem_write;
237 mmio->opaque[idx] = opaque;
243 ppc4xx_mmio_t *ppc4xx_mmio_init (CPUState *env, target_phys_addr_t base)
248 mmio = qemu_mallocz(sizeof(ppc4xx_mmio_t));
250 mmio_memory = cpu_register_io_memory(0, mmio_read, mmio_write, mmio);
251 #if defined(DEBUG_MMIO)
252 printf("%s: base " PADDRX " len %08x %d\n", __func__,
253 base, TARGET_PAGE_SIZE, mmio_memory);
255 cpu_register_physical_memory(base, TARGET_PAGE_SIZE, mmio_memory);
256 ppc4xx_mmio_register(env, mmio, 0, TARGET_PAGE_SIZE,
257 unassigned_mmio_read, unassigned_mmio_write,
263 /*****************************************************************************/
264 /* "Universal" Interrupt controller */
278 #define UIC_MAX_IRQ 32
279 typedef struct ppcuic_t ppcuic_t;
283 uint32_t level; /* Remembers the state of level-triggered interrupts. */
284 uint32_t uicsr; /* Status register */
285 uint32_t uicer; /* Enable register */
286 uint32_t uiccr; /* Critical register */
287 uint32_t uicpr; /* Polarity register */
288 uint32_t uictr; /* Triggering register */
289 uint32_t uicvcr; /* Vector configuration register */
294 static void ppcuic_trigger_irq (ppcuic_t *uic)
297 int start, end, inc, i;
299 /* Trigger interrupt if any is pending */
300 ir = uic->uicsr & uic->uicer & (~uic->uiccr);
301 cr = uic->uicsr & uic->uicer & uic->uiccr;
302 LOG_UIC("%s: uicsr %08" PRIx32 " uicer %08" PRIx32
303 " uiccr %08" PRIx32 "\n"
304 " %08" PRIx32 " ir %08" PRIx32 " cr %08" PRIx32 "\n",
305 __func__, uic->uicsr, uic->uicer, uic->uiccr,
306 uic->uicsr & uic->uicer, ir, cr);
307 if (ir != 0x0000000) {
308 LOG_UIC("Raise UIC interrupt\n");
309 qemu_irq_raise(uic->irqs[PPCUIC_OUTPUT_INT]);
311 LOG_UIC("Lower UIC interrupt\n");
312 qemu_irq_lower(uic->irqs[PPCUIC_OUTPUT_INT]);
314 /* Trigger critical interrupt if any is pending and update vector */
315 if (cr != 0x0000000) {
316 qemu_irq_raise(uic->irqs[PPCUIC_OUTPUT_CINT]);
317 if (uic->use_vectors) {
318 /* Compute critical IRQ vector */
319 if (uic->uicvcr & 1) {
328 uic->uicvr = uic->uicvcr & 0xFFFFFFFC;
329 for (i = start; i <= end; i += inc) {
331 uic->uicvr += (i - start) * 512 * inc;
336 LOG_UIC("Raise UIC critical interrupt - "
337 "vector %08" PRIx32 "\n", uic->uicvr);
339 LOG_UIC("Lower UIC critical interrupt\n");
340 qemu_irq_lower(uic->irqs[PPCUIC_OUTPUT_CINT]);
341 uic->uicvr = 0x00000000;
345 static void ppcuic_set_irq (void *opaque, int irq_num, int level)
351 mask = 1 << (31-irq_num);
352 LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32
353 " mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n",
354 __func__, irq_num, level,
355 uic->uicsr, mask, uic->uicsr & mask, level << irq_num);
356 if (irq_num < 0 || irq_num > 31)
360 /* Update status register */
361 if (uic->uictr & mask) {
362 /* Edge sensitive interrupt */
366 /* Level sensitive interrupt */
375 LOG_UIC("%s: irq %d level %d sr %" PRIx32 " => "
376 "%08" PRIx32 "\n", __func__, irq_num, level, uic->uicsr, sr);
377 if (sr != uic->uicsr)
378 ppcuic_trigger_irq(uic);
381 static target_ulong dcr_read_uic (void *opaque, int dcrn)
387 dcrn -= uic->dcr_base;
406 ret = uic->uicsr & uic->uicer;
409 if (!uic->use_vectors)
414 if (!uic->use_vectors)
427 static void dcr_write_uic (void *opaque, int dcrn, target_ulong val)
432 dcrn -= uic->dcr_base;
433 LOG_UIC("%s: dcr %d val " ADDRX "\n", __func__, dcrn, val);
437 uic->uicsr |= uic->level;
438 ppcuic_trigger_irq(uic);
442 ppcuic_trigger_irq(uic);
446 ppcuic_trigger_irq(uic);
450 ppcuic_trigger_irq(uic);
457 ppcuic_trigger_irq(uic);
464 uic->uicvcr = val & 0xFFFFFFFD;
465 ppcuic_trigger_irq(uic);
470 static void ppcuic_reset (void *opaque)
475 uic->uiccr = 0x00000000;
476 uic->uicer = 0x00000000;
477 uic->uicpr = 0x00000000;
478 uic->uicsr = 0x00000000;
479 uic->uictr = 0x00000000;
480 if (uic->use_vectors) {
481 uic->uicvcr = 0x00000000;
482 uic->uicvr = 0x0000000;
486 qemu_irq *ppcuic_init (CPUState *env, qemu_irq *irqs,
487 uint32_t dcr_base, int has_ssr, int has_vr)
492 uic = qemu_mallocz(sizeof(ppcuic_t));
493 uic->dcr_base = dcr_base;
496 uic->use_vectors = 1;
497 for (i = 0; i < DCR_UICMAX; i++) {
498 ppc_dcr_register(env, dcr_base + i, uic,
499 &dcr_read_uic, &dcr_write_uic);
501 qemu_register_reset(ppcuic_reset, uic);
504 return qemu_allocate_irqs(&ppcuic_set_irq, uic, UIC_MAX_IRQ);
507 /*****************************************************************************/
508 /* SDRAM controller */
509 typedef struct ppc4xx_sdram_t ppc4xx_sdram_t;
510 struct ppc4xx_sdram_t {
513 target_phys_addr_t ram_bases[4];
514 target_phys_addr_t ram_sizes[4];
530 SDRAM0_CFGADDR = 0x010,
531 SDRAM0_CFGDATA = 0x011,
534 /* XXX: TOFIX: some patches have made this code become inconsistent:
535 * there are type inconsistencies, mixing target_phys_addr_t, target_ulong
538 static uint32_t sdram_bcr (target_phys_addr_t ram_base,
539 target_phys_addr_t ram_size)
544 case (4 * 1024 * 1024):
547 case (8 * 1024 * 1024):
550 case (16 * 1024 * 1024):
553 case (32 * 1024 * 1024):
556 case (64 * 1024 * 1024):
559 case (128 * 1024 * 1024):
562 case (256 * 1024 * 1024):
566 printf("%s: invalid RAM size " PADDRX "\n", __func__, ram_size);
569 bcr |= ram_base & 0xFF800000;
575 static always_inline target_phys_addr_t sdram_base (uint32_t bcr)
577 return bcr & 0xFF800000;
580 static target_ulong sdram_size (uint32_t bcr)
585 sh = (bcr >> 17) & 0x7;
589 size = (4 * 1024 * 1024) << sh;
594 static void sdram_set_bcr (uint32_t *bcrp, uint32_t bcr, int enabled)
596 if (*bcrp & 0x00000001) {
599 printf("%s: unmap RAM area " PADDRX " " ADDRX "\n",
600 __func__, sdram_base(*bcrp), sdram_size(*bcrp));
602 cpu_register_physical_memory(sdram_base(*bcrp), sdram_size(*bcrp),
605 *bcrp = bcr & 0xFFDEE001;
606 if (enabled && (bcr & 0x00000001)) {
608 printf("%s: Map RAM area " PADDRX " " ADDRX "\n",
609 __func__, sdram_base(bcr), sdram_size(bcr));
611 cpu_register_physical_memory(sdram_base(bcr), sdram_size(bcr),
612 sdram_base(bcr) | IO_MEM_RAM);
616 static void sdram_map_bcr (ppc4xx_sdram_t *sdram)
620 for (i = 0; i < sdram->nbanks; i++) {
621 if (sdram->ram_sizes[i] != 0) {
622 sdram_set_bcr(&sdram->bcr[i],
623 sdram_bcr(sdram->ram_bases[i], sdram->ram_sizes[i]),
626 sdram_set_bcr(&sdram->bcr[i], 0x00000000, 0);
631 static void sdram_unmap_bcr (ppc4xx_sdram_t *sdram)
635 for (i = 0; i < sdram->nbanks; i++) {
637 printf("%s: Unmap RAM area " PADDRX " " ADDRX "\n",
638 __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i]));
640 cpu_register_physical_memory(sdram_base(sdram->bcr[i]),
641 sdram_size(sdram->bcr[i]),
646 static target_ulong dcr_read_sdram (void *opaque, int dcrn)
648 ppc4xx_sdram_t *sdram;
657 switch (sdram->addr) {
658 case 0x00: /* SDRAM_BESR0 */
661 case 0x08: /* SDRAM_BESR1 */
664 case 0x10: /* SDRAM_BEAR */
667 case 0x20: /* SDRAM_CFG */
670 case 0x24: /* SDRAM_STATUS */
673 case 0x30: /* SDRAM_RTR */
676 case 0x34: /* SDRAM_PMIT */
679 case 0x40: /* SDRAM_B0CR */
682 case 0x44: /* SDRAM_B1CR */
685 case 0x48: /* SDRAM_B2CR */
688 case 0x4C: /* SDRAM_B3CR */
691 case 0x80: /* SDRAM_TR */
694 case 0x94: /* SDRAM_ECCCFG */
697 case 0x98: /* SDRAM_ECCESR */
706 /* Avoid gcc warning */
714 static void dcr_write_sdram (void *opaque, int dcrn, target_ulong val)
716 ppc4xx_sdram_t *sdram;
724 switch (sdram->addr) {
725 case 0x00: /* SDRAM_BESR0 */
726 sdram->besr0 &= ~val;
728 case 0x08: /* SDRAM_BESR1 */
729 sdram->besr1 &= ~val;
731 case 0x10: /* SDRAM_BEAR */
734 case 0x20: /* SDRAM_CFG */
736 if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) {
738 printf("%s: enable SDRAM controller\n", __func__);
740 /* validate all RAM mappings */
741 sdram_map_bcr(sdram);
742 sdram->status &= ~0x80000000;
743 } else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) {
745 printf("%s: disable SDRAM controller\n", __func__);
747 /* invalidate all RAM mappings */
748 sdram_unmap_bcr(sdram);
749 sdram->status |= 0x80000000;
751 if (!(sdram->cfg & 0x40000000) && (val & 0x40000000))
752 sdram->status |= 0x40000000;
753 else if ((sdram->cfg & 0x40000000) && !(val & 0x40000000))
754 sdram->status &= ~0x40000000;
757 case 0x24: /* SDRAM_STATUS */
758 /* Read-only register */
760 case 0x30: /* SDRAM_RTR */
761 sdram->rtr = val & 0x3FF80000;
763 case 0x34: /* SDRAM_PMIT */
764 sdram->pmit = (val & 0xF8000000) | 0x07C00000;
766 case 0x40: /* SDRAM_B0CR */
767 sdram_set_bcr(&sdram->bcr[0], val, sdram->cfg & 0x80000000);
769 case 0x44: /* SDRAM_B1CR */
770 sdram_set_bcr(&sdram->bcr[1], val, sdram->cfg & 0x80000000);
772 case 0x48: /* SDRAM_B2CR */
773 sdram_set_bcr(&sdram->bcr[2], val, sdram->cfg & 0x80000000);
775 case 0x4C: /* SDRAM_B3CR */
776 sdram_set_bcr(&sdram->bcr[3], val, sdram->cfg & 0x80000000);
778 case 0x80: /* SDRAM_TR */
779 sdram->tr = val & 0x018FC01F;
781 case 0x94: /* SDRAM_ECCCFG */
782 sdram->ecccfg = val & 0x00F00000;
784 case 0x98: /* SDRAM_ECCESR */
786 if (sdram->eccesr == 0 && val != 0)
787 qemu_irq_raise(sdram->irq);
788 else if (sdram->eccesr != 0 && val == 0)
789 qemu_irq_lower(sdram->irq);
799 static void sdram_reset (void *opaque)
801 ppc4xx_sdram_t *sdram;
804 sdram->addr = 0x00000000;
805 sdram->bear = 0x00000000;
806 sdram->besr0 = 0x00000000; /* No error */
807 sdram->besr1 = 0x00000000; /* No error */
808 sdram->cfg = 0x00000000;
809 sdram->ecccfg = 0x00000000; /* No ECC */
810 sdram->eccesr = 0x00000000; /* No error */
811 sdram->pmit = 0x07C00000;
812 sdram->rtr = 0x05F00000;
813 sdram->tr = 0x00854009;
814 /* We pre-initialize RAM banks */
815 sdram->status = 0x00000000;
816 sdram->cfg = 0x00800000;
817 sdram_unmap_bcr(sdram);
820 void ppc4xx_sdram_init (CPUState *env, qemu_irq irq, int nbanks,
821 target_phys_addr_t *ram_bases,
822 target_phys_addr_t *ram_sizes,
825 ppc4xx_sdram_t *sdram;
827 sdram = qemu_mallocz(sizeof(ppc4xx_sdram_t));
829 sdram->nbanks = nbanks;
830 memset(sdram->ram_bases, 0, 4 * sizeof(target_phys_addr_t));
831 memcpy(sdram->ram_bases, ram_bases,
832 nbanks * sizeof(target_phys_addr_t));
833 memset(sdram->ram_sizes, 0, 4 * sizeof(target_phys_addr_t));
834 memcpy(sdram->ram_sizes, ram_sizes,
835 nbanks * sizeof(target_phys_addr_t));
837 qemu_register_reset(&sdram_reset, sdram);
838 ppc_dcr_register(env, SDRAM0_CFGADDR,
839 sdram, &dcr_read_sdram, &dcr_write_sdram);
840 ppc_dcr_register(env, SDRAM0_CFGDATA,
841 sdram, &dcr_read_sdram, &dcr_write_sdram);
843 sdram_map_bcr(sdram);
846 /* Fill in consecutive SDRAM banks with 'ram_size' bytes of memory.
848 * sdram_bank_sizes[] must be 0-terminated.
850 * The 4xx SDRAM controller supports a small number of banks, and each bank
851 * must be one of a small set of sizes. The number of banks and the supported
852 * sizes varies by SoC. */
853 ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks,
854 target_phys_addr_t ram_bases[],
855 target_phys_addr_t ram_sizes[],
856 const unsigned int sdram_bank_sizes[])
858 ram_addr_t size_left = ram_size;
862 for (i = 0; i < nr_banks; i++) {
863 for (j = 0; sdram_bank_sizes[j] != 0; j++) {
864 unsigned int bank_size = sdram_bank_sizes[j];
866 if (bank_size <= size_left) {
867 ram_bases[i] = qemu_ram_alloc(bank_size);
868 ram_sizes[i] = bank_size;
869 size_left -= bank_size;
875 /* No need to use the remaining banks. */
880 ram_size -= size_left;
882 printf("Truncating memory to %d MiB to fit SDRAM controller limits.\n",
883 (int)(ram_size >> 20));