arch/tile: fix bug in loading kernels larger than 16 MB
authorChris Metcalf <cmetcalf@tilera.com>
Thu, 29 Mar 2012 19:42:27 +0000 (15:42 -0400)
committerChris Metcalf <cmetcalf@tilera.com>
Mon, 2 Apr 2012 16:13:12 +0000 (12:13 -0400)
Previously we only handled kernels up to a single huge page in size.
Now we create additional PTEs appropriately.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
arch/tile/mm/init.c

index 830c490..8400d3f 100644 (file)
@@ -557,6 +557,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 
        address = MEM_SV_INTRPT;
        pmd = get_pmd(pgtables, address);
+       pfn = 0;  /* code starts at PA 0 */
        if (ktext_small) {
                /* Allocate an L2 PTE for the kernel text */
                int cpu = 0;
@@ -579,10 +580,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                }
 
                BUG_ON(address != (unsigned long)_stext);
-               pfn = 0;  /* code starts at PA 0 */
-               pte = alloc_pte();
-               for (pte_ofs = 0; address < (unsigned long)_einittext;
-                    pfn++, pte_ofs++, address += PAGE_SIZE) {
+               pte = NULL;
+               for (; address < (unsigned long)_einittext;
+                    pfn++, address += PAGE_SIZE) {
+                       pte_ofs = pte_index(address);
+                       if (pte_ofs == 0) {
+                               if (pte)
+                                       assign_pte(pmd++, pte);
+                               pte = alloc_pte();
+                       }
                        if (!ktext_local) {
                                prot = set_remote_cache_cpu(prot, cpu);
                                cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,7 +597,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                        }
                        pte[pte_ofs] = pfn_pte(pfn, prot);
                }
-               assign_pte(pmd, pte);
+               if (pte)
+                       assign_pte(pmd, pte);
        } else {
                pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
                pteval = pte_mkhuge(pteval);
@@ -614,7 +621,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                else
                        pteval = hv_pte_set_mode(pteval,
                                                 HV_PTE_MODE_CACHE_NO_L3);
-               *(pte_t *)pmd = pteval;
+               for (; address < (unsigned long)_einittext;
+                    pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
+                       *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
        }
 
        /* Set swapper_pgprot here so it is flushed to memory right away. */