Merge branch 'master' of git://git.kernel.org/pub/scm/fs/xfs/xfs
[pandora-kernel.git] / arch / powerpc / mm / fault.c
index 8660986..7699394 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
+#include <asm/firmware.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
@@ -252,45 +253,33 @@ good_area:
 #endif /* CONFIG_8xx */
 
        if (is_exec) {
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
-               /* protection fault */
+#ifdef CONFIG_PPC_STD_MMU
+               /* Protection fault on exec go straight to failure on
+                * Hash based MMUs as they either don't support per-page
+                * execute permission, or if they do, it's handled already
+                * at the hash level. This test would probably have to
+                * be removed if we change the way this works to make hash
+                * processors use the same I/D cache coherency mechanism
+                * as embedded.
+                */
                if (error_code & DSISR_PROTFAULT)
                        goto bad_area;
+#endif /* CONFIG_PPC_STD_MMU */
+
                /*
                 * Allow execution from readable areas if the MMU does not
                 * provide separate controls over reading and executing.
+                *
+                * Note: That code used to not be enabled for 4xx/BookE.
+                * It is now as I/D cache coherency for these is done at
+                * set_pte_at() time and I see no reason why the test
+                * below wouldn't be valid on those processors. This -may-
+                * break programs compiled with a really old ABI though.
                 */
                if (!(vma->vm_flags & VM_EXEC) &&
                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
                        goto bad_area;
-#else
-               pte_t *ptep;
-               pmd_t *pmdp;
-
-               /* Since 4xx/Book-E supports per-page execute permission,
-                * we lazily flush dcache to icache. */
-               ptep = NULL;
-               if (get_pteptr(mm, address, &ptep, &pmdp)) {
-                       spinlock_t *ptl = pte_lockptr(mm, pmdp);
-                       spin_lock(ptl);
-                       if (pte_present(*ptep)) {
-                               struct page *page = pte_page(*ptep);
-
-                               if (!test_bit(PG_arch_1, &page->flags)) {
-                                       flush_dcache_icache_page(page);
-                                       set_bit(PG_arch_1, &page->flags);
-                               }
-                               pte_update(ptep, 0, _PAGE_HWEXEC |
-                                          _PAGE_ACCESSED);
-                               _tlbie(address, mm->context.id);
-                               pte_unmap_unlock(ptep, ptl);
-                               up_read(&mm->mmap_sem);
-                               return 0;
-                       }
-                       pte_unmap_unlock(ptep, ptl);
-               }
-#endif
        /* a write */
        } else if (is_write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -318,9 +307,16 @@ good_area:
                        goto do_sigbus;
                BUG();
        }
-       if (ret & VM_FAULT_MAJOR)
+       if (ret & VM_FAULT_MAJOR) {
                current->maj_flt++;
-       else
+#ifdef CONFIG_PPC_SMLPAR
+               if (firmware_has_feature(FW_FEATURE_CMO)) {
+                       preempt_disable();
+                       get_lppaca()->page_ins += (1 << PAGE_FACTOR);
+                       preempt_enable();
+               }
+#endif
+       } else
                current->min_flt++;
        up_read(&mm->mmap_sem);
        return 0;