diff -r 62ecb3b04f20 arch/arm/kernel/process.c
--- a/arch/arm/kernel/process.c	Wed Jun 10 04:00:49 2009 +0000
+++ b/arch/arm/kernel/process.c	Fri Aug 14 13:44:56 2009 -0500
@@ -100,7 +100,7 @@
 	/*
 	 * Now call the architecture specific reboot code.
 	 */
-	arch_reset(mode, cmd);
+	arch_reset(mode);
 
 	/*
 	 * Whoops - the architecture was unable to reboot.
diff -r 62ecb3b04f20 arch/arm/mm/Kconfig
--- a/arch/arm/mm/Kconfig	Wed Jun 10 04:00:49 2009 +0000
+++ b/arch/arm/mm/Kconfig	Fri Aug 14 13:44:56 2009 -0500
@@ -391,7 +391,7 @@
 
 # ARMv6
 config CPU_V6
-	bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB
+	bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_COMCERTO
 	select CPU_32v6
 	select CPU_ABRT_EV6
 	select CPU_PABRT_NOIFAR
diff -r 62ecb3b04f20 arch/arm/mm/cache-v6.S
--- a/arch/arm/mm/cache-v6.S	Wed Jun 10 04:00:49 2009 +0000
+++ b/arch/arm/mm/cache-v6.S	Fri Aug 14 13:44:56 2009 -0500
@@ -193,6 +193,10 @@
 #else
 	mcrne	p15, 0, r1, c7, c15, 1		@ clean & invalidate unified line
 #endif
+#ifdef CONFIG_ARCH_COMCERTO
+	sub	r1, r1, #1
+	mcrr		p15, 0, r1, r0, c6
+#else
 1:
 #ifdef HARVARD_CACHE
 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D line
@@ -202,6 +206,7 @@
 	add	r0, r0, #D_CACHE_LINE_SIZE
 	cmp	r0, r1
 	blo	1b
+#endif	
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mov	pc, lr
@@ -212,6 +217,11 @@
  *	- end     - virtual end address of region
  */
 ENTRY(v6_dma_clean_range)
+#ifdef CONFIG_ARCH_COMCERTO
+	sub	r1, r1, #1
+	mcrr		p15, 0, r1, r0, c12
+#else
+
 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 1:
 #ifdef HARVARD_CACHE
@@ -222,6 +232,7 @@
 	add	r0, r0, #D_CACHE_LINE_SIZE
 	cmp	r0, r1
 	blo	1b
+#endif	
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mov	pc, lr
@@ -232,6 +243,10 @@
  *	- end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_ARCH_COMCERTO
+	sub	r1, r1, #1
+	mcrr		p15, 0, r1, r0, c14
+#else
 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 1:
 #ifdef HARVARD_CACHE
@@ -242,6 +257,7 @@
 	add	r0, r0, #D_CACHE_LINE_SIZE
 	cmp	r0, r1
 	blo	1b
+#endif	
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mov	pc, lr
diff -r 62ecb3b04f20 arch/arm/mm/init.c
--- a/arch/arm/mm/init.c	Wed Jun 10 04:00:49 2009 +0000
+++ b/arch/arm/mm/init.c	Fri Aug 14 13:44:56 2009 -0500
@@ -235,7 +235,36 @@
 		if (end_pfn < end)
 			end_pfn = end;
 
+/* We handle the DMA zone as a special case to make it
+   Inner Cacheable and Outer non-Cacheable.
+ */
+#if defined(CONFIG_ARCH_COMCERTO) && defined(CONFIG_CACHE_L2X0)
+		if (node == 0 && i == 0) {
+			struct map_desc map;
+
+			WARN_ON(bank->start != PHYS_OFFSET);
+			map.pfn = __phys_to_pfn(bank->start);
+			map.virtual = __phys_to_virt(bank->start);
+			map.length = (__pa(swapper_pg_dir) & SECTION_MASK) - PHYS_OFFSET;
+			map.type = MT_DMA_MEMORY;
+			create_mapping(&map);
+			
+			bank->start += map.length;
+			bank->size -= map.length;
+		}
+#endif
 		map_memory_bank(bank);
+
+/* We want to change the MMU settings for the DMA ZONE, but not the actual 
+   memory bank descriptions, so we restore the initial settings once the
+   specific mapping is done.
+ */
+#if defined(CONFIG_ARCH_COMCERTO) && defined(CONFIG_CACHE_L2X0)
+		if (node == 0 && i == 0) {
+			bank->start -= (__pa(swapper_pg_dir) & SECTION_MASK) - PHYS_OFFSET;
+			bank->size += (__pa(swapper_pg_dir) & SECTION_MASK) - PHYS_OFFSET;
+		}
+#endif
 	}
 
 	/*
@@ -505,6 +534,10 @@
 				    __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
 #endif
 
+#if defined(CONFIG_ARCH_COMCERTO) && defined(CONFIG_CACHE_L2X0)
+	/* now that our DMA memory is actually so designated, we can free it */
+	free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir & SECTION_MASK, NULL);
+#endif
 #ifdef CONFIG_HIGHMEM
 	/* set highmem page free */
 	for_each_online_node(node) {
diff -r 62ecb3b04f20 arch/arm/mm/mmu.c
--- a/arch/arm/mm/mmu.c	Wed Jun 10 04:00:49 2009 +0000
+++ b/arch/arm/mm/mmu.c	Fri Aug 14 13:44:56 2009 -0500
@@ -249,6 +249,12 @@
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 	},
+#if defined(CONFIG_ARCH_COMCERTO) && defined(CONFIG_CACHE_L2X0)
+	[MT_DMA_MEMORY] = {
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_WB | PMD_SECT_TEX(4),
+		.domain	   = DOMAIN_KERNEL,
+	},
+#endif
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -493,7 +499,12 @@
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
+#if defined(CONFIG_ARCH_COMCERTO)
+void
+#else
+static void
+#endif
+__init alloc_init_section(pgd_t *pgd, unsigned long addr,
 				      unsigned long end, unsigned long phys,
 				      const struct mem_type *type)
 {
@@ -846,6 +857,12 @@
 	 */
 	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
 #endif
+#if defined(CONFIG_ARCH_COMCERTO)
+#if defined(CONFIG_ARCH_COMCERTO) && defined(CONFIG_CACHE_L2X0)
+	/* Reserve our DMA zone so noone else takes a chunk of it. */
+	res_size = (__pa(swapper_pg_dir) & SECTION_MASK) - PHYS_OFFSET;
+#endif
+#endif
 	if (res_size)
 		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
 				BOOTMEM_DEFAULT);
