select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
+ select HAVE_IOREMAP_PROT
select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
help
extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
+extern void __iomem *ioremap_prot(resource_size_t, unsigned long size,
+ unsigned long prot_val);
+
/*
* Bad read/write accesses...
*/
__builtin_return_address(0));
}
+void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
+ unsigned long prot_val)
+{
+ const struct mem_type *type;
+ unsigned long addr;
+ struct vm_struct * area;
+ pteval_t prot_pte;
+ int err;
+
+ size = PAGE_ALIGN(size);
+
+ type = get_mem_type(MT_DEVICE);
+ if (!type)
+ return NULL;
+
+ prot_pte = type->prot_pte & ~L_PTE_MT_MASK;
+ prot_pte |= prot_val & L_PTE_MT_MASK;
+
+ area = get_vm_area_caller(size, VM_IOREMAP,
+ __builtin_return_address(0));
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+
+ err = ioremap_page_range(addr, addr + size, phys_addr,
+ __pgprot(prot_pte));
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ flush_cache_vmap(addr, addr + size);
+ return (void __iomem *)addr;
+}
+
+EXPORT_SYMBOL(ioremap_prot);
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);