#include <log.h>
#include <malloc.h>
+#include <asm/global_data.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
#define LMB_ALLOC_ANYWHERE 0
-void lmb_dump_all_force(struct lmb *lmb)
+static void lmb_dump_region(struct lmb_region *rgn, char *name)
{
- unsigned long i;
+ unsigned long long base, size, end;
+ enum lmb_flags flags;
+ int i;
- printf("lmb_dump_all:\n");
- printf(" memory.cnt = 0x%lx\n", lmb->memory.cnt);
- for (i = 0; i < lmb->memory.cnt; i++) {
- printf(" memory.reg[0x%lx].base = 0x%llx\n", i,
- (unsigned long long)lmb->memory.region[i].base);
- printf(" .size = 0x%llx\n",
- (unsigned long long)lmb->memory.region[i].size);
- }
+ printf(" %s.cnt = 0x%lx\n", name, rgn->cnt);
- printf("\n reserved.cnt = 0x%lx\n", lmb->reserved.cnt);
- for (i = 0; i < lmb->reserved.cnt; i++) {
- printf(" reserved.reg[0x%lx].base = 0x%llx\n", i,
- (unsigned long long)lmb->reserved.region[i].base);
- printf(" .size = 0x%llx\n",
- (unsigned long long)lmb->reserved.region[i].size);
+ for (i = 0; i < rgn->cnt; i++) {
+ base = rgn->region[i].base;
+ size = rgn->region[i].size;
+ end = base + size - 1;
+ flags = rgn->region[i].flags;
+
+ printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
+ name, i, base, end, size, flags);
}
}
+void lmb_dump_all_force(struct lmb *lmb)
+{
+ printf("lmb_dump_all:\n");
+ lmb_dump_region(&lmb->memory, "memory");
+ lmb_dump_region(&lmb->reserved, "reserved");
+}
+
void lmb_dump_all(struct lmb *lmb)
{
#ifdef DEBUG
for (i = r; i < rgn->cnt - 1; i++) {
rgn->region[i].base = rgn->region[i + 1].base;
rgn->region[i].size = rgn->region[i + 1].size;
+ rgn->region[i].flags = rgn->region[i + 1].flags;
}
rgn->cnt--;
}
lmb->reserved.cnt = 0;
}
+void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align)
+{
+ ulong bank_end;
+ int bank;
+
+ /*
+ * Reserve memory from aligned address below the bottom of U-Boot stack
+ * until end of U-Boot area using LMB to prevent U-Boot from overwriting
+ * that memory.
+ */
+ debug("## Current stack ends at 0x%08lx ", sp);
+
+ /* adjust sp by 4K to be safe */
+ sp -= align;
+ for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
+ if (!gd->bd->bi_dram[bank].size ||
+ sp < gd->bd->bi_dram[bank].start)
+ continue;
+ /* Watch out for RAM at end of address space! */
+ bank_end = gd->bd->bi_dram[bank].start +
+ gd->bd->bi_dram[bank].size - 1;
+ if (sp > bank_end)
+ continue;
+ if (bank_end > end)
+ bank_end = end - 1;
+
+ lmb_reserve(lmb, sp, bank_end - sp + 1);
+ break;
+ }
+}
+
static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
{
arch_lmb_reserve(lmb);
board_lmb_reserve(lmb);
- if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
+ if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
}
}
/* This routine called with relocation disabled. */
-static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
+static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
+ phys_size_t size, enum lmb_flags flags)
{
unsigned long coalesced = 0;
long adjacent, i;
if (rgn->cnt == 0) {
rgn->region[0].base = base;
rgn->region[0].size = size;
+ rgn->region[0].flags = flags;
rgn->cnt = 1;
return 0;
}
for (i = 0; i < rgn->cnt; i++) {
phys_addr_t rgnbase = rgn->region[i].base;
phys_size_t rgnsize = rgn->region[i].size;
+ phys_size_t rgnflags = rgn->region[i].flags;
- if ((rgnbase == base) && (rgnsize == size))
- /* Already have this region, so we're done */
- return 0;
+ if (rgnbase == base && rgnsize == size) {
+ if (flags == rgnflags)
+ /* Already have this region, so we're done */
+ return 0;
+ else
+ return -1; /* regions with new flags */
+ }
adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
if (adjacent > 0) {
+ if (flags != rgnflags)
+ break;
rgn->region[i].base -= size;
rgn->region[i].size += size;
coalesced++;
break;
} else if (adjacent < 0) {
+ if (flags != rgnflags)
+ break;
rgn->region[i].size += size;
coalesced++;
break;
}
if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
- lmb_coalesce_regions(rgn, i, i + 1);
- coalesced++;
+ if (rgn->region[i].flags == rgn->region[i + 1].flags) {
+ lmb_coalesce_regions(rgn, i, i + 1);
+ coalesced++;
+ }
}
if (coalesced)
if (base < rgn->region[i].base) {
rgn->region[i + 1].base = rgn->region[i].base;
rgn->region[i + 1].size = rgn->region[i].size;
+ rgn->region[i + 1].flags = rgn->region[i].flags;
} else {
rgn->region[i + 1].base = base;
rgn->region[i + 1].size = size;
+ rgn->region[i + 1].flags = flags;
break;
}
}
if (base < rgn->region[0].base) {
rgn->region[0].base = base;
rgn->region[0].size = size;
+ rgn->region[0].flags = flags;
}
rgn->cnt++;
return 0;
}
+static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
+ phys_size_t size)
+{
+ return lmb_add_region_flags(rgn, base, size, LMB_NONE);
+}
+
/* This routine may be called with relocation disabled. */
long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
{
* beginging of the hole and add the region after hole.
*/
rgn->region[i].size = base - rgn->region[i].base;
- return lmb_add_region(rgn, end + 1, rgnend - end);
+ return lmb_add_region_flags(rgn, end + 1, rgnend - end,
+ rgn->region[i].flags);
}
-long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
+ enum lmb_flags flags)
{
struct lmb_region *_rgn = &(lmb->reserved);
- return lmb_add_region(_rgn, base, size);
+ return lmb_add_region_flags(_rgn, base, size, flags);
+}
+
+long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+{
+ return lmb_reserve_flags(lmb, base, size, LMB_NONE);
}
static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
return 0;
}
-int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
+int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags)
{
int i;
phys_addr_t upper = lmb->reserved.region[i].base +
lmb->reserved.region[i].size - 1;
if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
- return 1;
+ return (lmb->reserved.region[i].flags & flags) == flags;
}
return 0;
}
+int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
+{
+ return lmb_is_reserved_flags(lmb, addr, LMB_NONE);
+}
+
__weak void board_lmb_reserve(struct lmb *lmb)
{
/* please define platform specific board_lmb_reserve() */