[PATCH] x86: Improve handling of kernel mappings in change_page_attr
authorJan Beulich <jbeulich@novell.com>
Wed, 2 May 2007 17:27:10 +0000 (19:27 +0200)
committerAndi Kleen <andi@basil.nowhere.org>
Wed, 2 May 2007 17:27:10 +0000 (19:27 +0200)
Fix various broken corner cases in i386 and x86-64 change_page_attr.

AK: split off from tighten kernel image access rights

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
arch/i386/mm/pageattr.c
arch/x86_64/mm/pageattr.c
include/asm-i386/pgtable.h

index 412ebbd..ea6b6d4 100644 (file)
@@ -142,7 +142,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
                return -EINVAL;
        kpte_page = virt_to_page(kpte);
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
-               if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
+               if (!pte_huge(*kpte)) {
                        set_pte_atomic(kpte, mk_pte(page, prot)); 
                } else {
                        pgprot_t ref_prot;
@@ -158,7 +158,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
                        kpte_page = split;
                }
                page_private(kpte_page)++;
-       } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
+       } else if (!pte_huge(*kpte)) {
                set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
                BUG_ON(page_private(kpte_page) == 0);
                page_private(kpte_page)--;
index 76ee90a..bf4aa8d 100644 (file)
@@ -179,16 +179,24 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 {
        unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT;
-       int err = 0
+       int err = 0, kernel_map = 0;
        int i; 
 
+       if (address >= __START_KERNEL_map
+           && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
+               address = (unsigned long)__va(__pa(address));
+               kernel_map = 1;
+       }
+
        down_write(&init_mm.mmap_sem);
        for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
                unsigned long pfn = __pa(address) >> PAGE_SHIFT;
 
-               err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
-               if (err) 
-                       break; 
+               if (!kernel_map || pte_present(pfn_pte(0, prot))) {
+                       err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+                       if (err)
+                               break;
+               }
                /* Handle kernel mapping too which aliases part of the
                 * lowmem */
                if ((pfn >= phys_base_pfn) &&
index c3b58d4..143ddc4 100644 (file)
@@ -159,6 +159,7 @@ void paging_init(void);
 
 extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
 #define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_RX               (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 #define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_PCD)
 #define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -166,6 +167,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
 #define PAGE_KERNEL            __pgprot(__PAGE_KERNEL)
 #define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO)
 #define PAGE_KERNEL_EXEC       __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RX         __pgprot(__PAGE_KERNEL_RX)
 #define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE)
 #define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)