[PATCH] revised Memory Add Fixes for ppc64
[pandora-kernel.git] / arch / powerpc / mm / hash_utils_64.c
1 /*
2  * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3  *   {mikejc|engebret}@us.ibm.com
4  *
5  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6  *
7  * SMP scalability work:
8  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  * 
10  *    Module name: htab.c
11  *
12  *    Description:
13  *      PowerPC Hashed Page Table functions
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  */
20
21 #undef DEBUG
22 #undef DEBUG_LOW
23
24 #include <linux/config.h>
25 #include <linux/spinlock.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/proc_fs.h>
29 #include <linux/stat.h>
30 #include <linux/sysctl.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
35
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu.h>
39 #include <asm/mmu_context.h>
40 #include <asm/page.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
45 #include <asm/lmb.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
48 #include <asm/io.h>
49 #include <asm/eeh.h>
50 #include <asm/tlb.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/abs_addr.h>
54 #include <asm/sections.h>
55
56 #ifdef DEBUG
57 #define DBG(fmt...) udbg_printf(fmt)
58 #else
59 #define DBG(fmt...)
60 #endif
61
62 #ifdef DEBUG_LOW
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
64 #else
65 #define DBG_LOW(fmt...)
66 #endif
67
68 #define KB (1024)
69 #define MB (1024*KB)
70
71 /*
72  * Note:  pte   --> Linux PTE
73  *        HPTE  --> PowerPC Hashed Page Table Entry
74  *
75  * Execution context:
76  *   htab_initialize is called with the MMU off (of course), but
77  *   the kernel has been copied down to zero so it can directly
78  *   reference global data.  At this point it is very difficult
79  *   to print debug info.
80  *
81  */
82
83 #ifdef CONFIG_U3_DART
84 extern unsigned long dart_tablebase;
85 #endif /* CONFIG_U3_DART */
86
87 hpte_t *htab_address;
88 unsigned long htab_hash_mask;
89 unsigned long _SDR1;
90 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
91 int mmu_linear_psize = MMU_PAGE_4K;
92 int mmu_virtual_psize = MMU_PAGE_4K;
93 #ifdef CONFIG_HUGETLB_PAGE
94 int mmu_huge_psize = MMU_PAGE_16M;
95 unsigned int HPAGE_SHIFT;
96 #endif
97
98 /* There are definitions of page sizes arrays to be used when none
99  * is provided by the firmware.
100  */
101
102 /* Pre-POWER4 CPUs (4k pages only)
103  */
104 struct mmu_psize_def mmu_psize_defaults_old[] = {
105         [MMU_PAGE_4K] = {
106                 .shift  = 12,
107                 .sllp   = 0,
108                 .penc   = 0,
109                 .avpnm  = 0,
110                 .tlbiel = 0,
111         },
112 };
113
114 /* POWER4, GPUL, POWER5
115  *
116  * Support for 16Mb large pages
117  */
118 struct mmu_psize_def mmu_psize_defaults_gp[] = {
119         [MMU_PAGE_4K] = {
120                 .shift  = 12,
121                 .sllp   = 0,
122                 .penc   = 0,
123                 .avpnm  = 0,
124                 .tlbiel = 1,
125         },
126         [MMU_PAGE_16M] = {
127                 .shift  = 24,
128                 .sllp   = SLB_VSID_L,
129                 .penc   = 0,
130                 .avpnm  = 0x1UL,
131                 .tlbiel = 0,
132         },
133 };
134
135
136 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
137                       unsigned long pstart, unsigned long mode, int psize)
138 {
139         unsigned long vaddr, paddr;
140         unsigned int step, shift;
141         unsigned long tmp_mode;
142         int ret = 0;
143
144         shift = mmu_psize_defs[psize].shift;
145         step = 1 << shift;
146
147         for (vaddr = vstart, paddr = pstart; vaddr < vend;
148              vaddr += step, paddr += step) {
149                 unsigned long vpn, hash, hpteg;
150                 unsigned long vsid = get_kernel_vsid(vaddr);
151                 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
152
153                 vpn = va >> shift;
154                 tmp_mode = mode;
155                 
156                 /* Make non-kernel text non-executable */
157                 if (!in_kernel_text(vaddr))
158                         tmp_mode = mode | HPTE_R_N;
159
160                 hash = hpt_hash(va, shift);
161                 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
162
163                 /* The crap below can be cleaned once ppd_md.probe() can
164                  * set up the hash callbacks, thus we can just used the
165                  * normal insert callback here.
166                  */
167 #ifdef CONFIG_PPC_ISERIES
168                 if (systemcfg->platform == PLATFORM_ISERIES_LPAR)
169                         ret = iSeries_hpte_insert(hpteg, va,
170                                                   virt_to_abs(paddr),
171                                                   tmp_mode,
172                                                   HPTE_V_BOLTED,
173                                                   psize);
174                 else
175 #endif
176 #ifdef CONFIG_PPC_PSERIES
177                 if (systemcfg->platform & PLATFORM_LPAR)
178                         ret = pSeries_lpar_hpte_insert(hpteg, va,
179                                                        virt_to_abs(paddr),
180                                                        tmp_mode,
181                                                        HPTE_V_BOLTED,
182                                                        psize);
183                 else
184 #endif
185 #ifdef CONFIG_PPC_MULTIPLATFORM
186                         ret = native_hpte_insert(hpteg, va,
187                                                  virt_to_abs(paddr),
188                                                  tmp_mode, HPTE_V_BOLTED,
189                                                  psize);
190 #endif
191                 if (ret < 0)
192                         break;
193         }
194         return ret < 0 ? ret : 0;
195 }
196
197 static int __init htab_dt_scan_page_sizes(unsigned long node,
198                                           const char *uname, int depth,
199                                           void *data)
200 {
201         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
202         u32 *prop;
203         unsigned long size = 0;
204
205         /* We are scanning "cpu" nodes only */
206         if (type == NULL || strcmp(type, "cpu") != 0)
207                 return 0;
208
209         prop = (u32 *)of_get_flat_dt_prop(node,
210                                           "ibm,segment-page-sizes", &size);
211         if (prop != NULL) {
212                 DBG("Page sizes from device-tree:\n");
213                 size /= 4;
214                 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
215                 while(size > 0) {
216                         unsigned int shift = prop[0];
217                         unsigned int slbenc = prop[1];
218                         unsigned int lpnum = prop[2];
219                         unsigned int lpenc = 0;
220                         struct mmu_psize_def *def;
221                         int idx = -1;
222
223                         size -= 3; prop += 3;
224                         while(size > 0 && lpnum) {
225                                 if (prop[0] == shift)
226                                         lpenc = prop[1];
227                                 prop += 2; size -= 2;
228                                 lpnum--;
229                         }
230                         switch(shift) {
231                         case 0xc:
232                                 idx = MMU_PAGE_4K;
233                                 break;
234                         case 0x10:
235                                 idx = MMU_PAGE_64K;
236                                 break;
237                         case 0x14:
238                                 idx = MMU_PAGE_1M;
239                                 break;
240                         case 0x18:
241                                 idx = MMU_PAGE_16M;
242                                 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
243                                 break;
244                         case 0x22:
245                                 idx = MMU_PAGE_16G;
246                                 break;
247                         }
248                         if (idx < 0)
249                                 continue;
250                         def = &mmu_psize_defs[idx];
251                         def->shift = shift;
252                         if (shift <= 23)
253                                 def->avpnm = 0;
254                         else
255                                 def->avpnm = (1 << (shift - 23)) - 1;
256                         def->sllp = slbenc;
257                         def->penc = lpenc;
258                         /* We don't know for sure what's up with tlbiel, so
259                          * for now we only set it for 4K and 64K pages
260                          */
261                         if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
262                                 def->tlbiel = 1;
263                         else
264                                 def->tlbiel = 0;
265
266                         DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
267                             "tlbiel=%d, penc=%d\n",
268                             idx, shift, def->sllp, def->avpnm, def->tlbiel,
269                             def->penc);
270                 }
271                 return 1;
272         }
273         return 0;
274 }
275
276
277 static void __init htab_init_page_sizes(void)
278 {
279         int rc;
280
281         /* Default to 4K pages only */
282         memcpy(mmu_psize_defs, mmu_psize_defaults_old,
283                sizeof(mmu_psize_defaults_old));
284
285         /*
286          * Try to find the available page sizes in the device-tree
287          */
288         rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
289         if (rc != 0)  /* Found */
290                 goto found;
291
292         /*
293          * Not in the device-tree, let's fallback on known size
294          * list for 16M capable GP & GR
295          */
296         if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) &&
297             cpu_has_feature(CPU_FTR_16M_PAGE))
298                 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
299                        sizeof(mmu_psize_defaults_gp));
300  found:
301         /*
302          * Pick a size for the linear mapping. Currently, we only support
303          * 16M, 1M and 4K which is the default
304          */
305         if (mmu_psize_defs[MMU_PAGE_16M].shift)
306                 mmu_linear_psize = MMU_PAGE_16M;
307         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
308                 mmu_linear_psize = MMU_PAGE_1M;
309
310         /*
311          * Pick a size for the ordinary pages. Default is 4K, we support
312          * 64K if cache inhibited large pages are supported by the
313          * processor
314          */
315 #ifdef CONFIG_PPC_64K_PAGES
316         if (mmu_psize_defs[MMU_PAGE_64K].shift &&
317             cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
318                 mmu_virtual_psize = MMU_PAGE_64K;
319 #endif
320
321         printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
322                mmu_psize_defs[mmu_linear_psize].shift,
323                mmu_psize_defs[mmu_virtual_psize].shift);
324
325 #ifdef CONFIG_HUGETLB_PAGE
326         /* Init large page size. Currently, we pick 16M or 1M depending
327          * on what is available
328          */
329         if (mmu_psize_defs[MMU_PAGE_16M].shift)
330                 mmu_huge_psize = MMU_PAGE_16M;
331         /* With 4k/4level pagetables, we can't (for now) cope with a
332          * huge page size < PMD_SIZE */
333         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
334                 mmu_huge_psize = MMU_PAGE_1M;
335
336         /* Calculate HPAGE_SHIFT and sanity check it */
337         if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
338             mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
339                 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
340         else
341                 HPAGE_SHIFT = 0; /* No huge pages dude ! */
342 #endif /* CONFIG_HUGETLB_PAGE */
343 }
344
345 static int __init htab_dt_scan_pftsize(unsigned long node,
346                                        const char *uname, int depth,
347                                        void *data)
348 {
349         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
350         u32 *prop;
351
352         /* We are scanning "cpu" nodes only */
353         if (type == NULL || strcmp(type, "cpu") != 0)
354                 return 0;
355
356         prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
357         if (prop != NULL) {
358                 /* pft_size[0] is the NUMA CEC cookie */
359                 ppc64_pft_size = prop[1];
360                 return 1;
361         }
362         return 0;
363 }
364
365 static unsigned long __init htab_get_table_size(void)
366 {
367         unsigned long rnd_mem_size, pteg_count;
368
369         /* If hash size isn't already provided by the platform, we try to
370          * retreive it from the device-tree. If it's not there neither, we
371          * calculate it now based on the total RAM size
372          */
373         if (ppc64_pft_size == 0)
374                 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
375         if (ppc64_pft_size)
376                 return 1UL << ppc64_pft_size;
377
378         /* round mem_size up to next power of 2 */
379         rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
380         if (rnd_mem_size < systemcfg->physicalMemorySize)
381                 rnd_mem_size <<= 1;
382
383         /* # pages / 2 */
384         pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
385
386         return pteg_count << 7;
387 }
388
389 #ifdef CONFIG_MEMORY_HOTPLUG
390 void create_section_mapping(unsigned long start, unsigned long end)
391 {
392                 BUG_ON(htab_bolt_mapping(start, end, start,
393                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
394                         mmu_linear_psize));
395 }
396 #endif /* CONFIG_MEMORY_HOTPLUG */
397
398 void __init htab_initialize(void)
399 {
400         unsigned long table, htab_size_bytes;
401         unsigned long pteg_count;
402         unsigned long mode_rw;
403         unsigned long base = 0, size = 0;
404         int i;
405
406         extern unsigned long tce_alloc_start, tce_alloc_end;
407
408         DBG(" -> htab_initialize()\n");
409
410         /* Initialize page sizes */
411         htab_init_page_sizes();
412
413         /*
414          * Calculate the required size of the htab.  We want the number of
415          * PTEGs to equal one half the number of real pages.
416          */ 
417         htab_size_bytes = htab_get_table_size();
418         pteg_count = htab_size_bytes >> 7;
419
420         htab_hash_mask = pteg_count - 1;
421
422         if (systemcfg->platform & PLATFORM_LPAR) {
423                 /* Using a hypervisor which owns the htab */
424                 htab_address = NULL;
425                 _SDR1 = 0; 
426         } else {
427                 /* Find storage for the HPT.  Must be contiguous in
428                  * the absolute address space.
429                  */
430                 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
431                 BUG_ON(table == 0);
432
433                 DBG("Hash table allocated at %lx, size: %lx\n", table,
434                     htab_size_bytes);
435
436                 htab_address = abs_to_virt(table);
437
438                 /* htab absolute addr + encoded htabsize */
439                 _SDR1 = table + __ilog2(pteg_count) - 11;
440
441                 /* Initialize the HPT with no entries */
442                 memset((void *)table, 0, htab_size_bytes);
443         }
444
445         mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
446
447         /* On U3 based machines, we need to reserve the DART area and
448          * _NOT_ map it to avoid cache paradoxes as it's remapped non
449          * cacheable later on
450          */
451
452         /* create bolted the linear mapping in the hash table */
453         for (i=0; i < lmb.memory.cnt; i++) {
454                 base = lmb.memory.region[i].base + KERNELBASE;
455                 size = lmb.memory.region[i].size;
456
457                 DBG("creating mapping for region: %lx : %lx\n", base, size);
458
459 #ifdef CONFIG_U3_DART
460                 /* Do not map the DART space. Fortunately, it will be aligned
461                  * in such a way that it will not cross two lmb regions and
462                  * will fit within a single 16Mb page.
463                  * The DART space is assumed to be a full 16Mb region even if
464                  * we only use 2Mb of that space. We will use more of it later
465                  * for AGP GART. We have to use a full 16Mb large page.
466                  */
467                 DBG("DART base: %lx\n", dart_tablebase);
468
469                 if (dart_tablebase != 0 && dart_tablebase >= base
470                     && dart_tablebase < (base + size)) {
471                         if (base != dart_tablebase)
472                                 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
473                                                          base, mode_rw,
474                                                          mmu_linear_psize));
475                         if ((base + size) > (dart_tablebase + 16*MB))
476                                 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
477                                                          base + size,
478                                                          dart_tablebase+16*MB,
479                                                          mode_rw,
480                                                          mmu_linear_psize));
481                         continue;
482                 }
483 #endif /* CONFIG_U3_DART */
484                 BUG_ON(htab_bolt_mapping(base, base + size, base,
485                                          mode_rw, mmu_linear_psize));
486        }
487
488         /*
489          * If we have a memory_limit and we've allocated TCEs then we need to
490          * explicitly map the TCE area at the top of RAM. We also cope with the
491          * case that the TCEs start below memory_limit.
492          * tce_alloc_start/end are 16MB aligned so the mapping should work
493          * for either 4K or 16MB pages.
494          */
495         if (tce_alloc_start) {
496                 tce_alloc_start += KERNELBASE;
497                 tce_alloc_end += KERNELBASE;
498
499                 if (base + size >= tce_alloc_start)
500                         tce_alloc_start = base + size + 1;
501
502                 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
503                                          tce_alloc_start, mode_rw,
504                                          mmu_linear_psize));
505         }
506
507         DBG(" <- htab_initialize()\n");
508 }
509 #undef KB
510 #undef MB
511
512 /*
513  * Called by asm hashtable.S for doing lazy icache flush
514  */
515 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
516 {
517         struct page *page;
518
519         if (!pfn_valid(pte_pfn(pte)))
520                 return pp;
521
522         page = pte_page(pte);
523
524         /* page is dirty */
525         if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
526                 if (trap == 0x400) {
527                         __flush_dcache_icache(page_address(page));
528                         set_bit(PG_arch_1, &page->flags);
529                 } else
530                         pp |= HPTE_R_N;
531         }
532         return pp;
533 }
534
535 /* Result code is:
536  *  0 - handled
537  *  1 - normal page fault
538  * -1 - critical hash insertion error
539  */
540 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
541 {
542         void *pgdir;
543         unsigned long vsid;
544         struct mm_struct *mm;
545         pte_t *ptep;
546         cpumask_t tmp;
547         int rc, user_region = 0, local = 0;
548
549         DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
550                 ea, access, trap);
551
552         if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
553                 DBG_LOW(" out of pgtable range !\n");
554                 return 1;
555         }
556
557         /* Get region & vsid */
558         switch (REGION_ID(ea)) {
559         case USER_REGION_ID:
560                 user_region = 1;
561                 mm = current->mm;
562                 if (! mm) {
563                         DBG_LOW(" user region with no mm !\n");
564                         return 1;
565                 }
566                 vsid = get_vsid(mm->context.id, ea);
567                 break;
568         case VMALLOC_REGION_ID:
569                 mm = &init_mm;
570                 vsid = get_kernel_vsid(ea);
571                 break;
572         default:
573                 /* Not a valid range
574                  * Send the problem up to do_page_fault 
575                  */
576                 return 1;
577         }
578         DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
579
580         /* Get pgdir */
581         pgdir = mm->pgd;
582         if (pgdir == NULL)
583                 return 1;
584
585         /* Check CPU locality */
586         tmp = cpumask_of_cpu(smp_processor_id());
587         if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
588                 local = 1;
589
590         /* Handle hugepage regions */
591         if (unlikely(in_hugepage_area(mm->context, ea))) {
592                 DBG_LOW(" -> huge page !\n");
593                 return hash_huge_page(mm, access, ea, vsid, local);
594         }
595
596         /* Get PTE and page size from page tables */
597         ptep = find_linux_pte(pgdir, ea);
598         if (ptep == NULL || !pte_present(*ptep)) {
599                 DBG_LOW(" no PTE !\n");
600                 return 1;
601         }
602
603 #ifndef CONFIG_PPC_64K_PAGES
604         DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
605 #else
606         DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
607                 pte_val(*(ptep + PTRS_PER_PTE)));
608 #endif
609         /* Pre-check access permissions (will be re-checked atomically
610          * in __hash_page_XX but this pre-check is a fast path
611          */
612         if (access & ~pte_val(*ptep)) {
613                 DBG_LOW(" no access !\n");
614                 return 1;
615         }
616
617         /* Do actual hashing */
618 #ifndef CONFIG_PPC_64K_PAGES
619         rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
620 #else
621         if (mmu_virtual_psize == MMU_PAGE_64K)
622                 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
623         else
624                 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
625 #endif /* CONFIG_PPC_64K_PAGES */
626
627 #ifndef CONFIG_PPC_64K_PAGES
628         DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
629 #else
630         DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
631                 pte_val(*(ptep + PTRS_PER_PTE)));
632 #endif
633         DBG_LOW(" -> rc=%d\n", rc);
634         return rc;
635 }
636
637 void hash_preload(struct mm_struct *mm, unsigned long ea,
638                   unsigned long access, unsigned long trap)
639 {
640         unsigned long vsid;
641         void *pgdir;
642         pte_t *ptep;
643         cpumask_t mask;
644         unsigned long flags;
645         int local = 0;
646
647         /* We don't want huge pages prefaulted for now
648          */
649         if (unlikely(in_hugepage_area(mm->context, ea)))
650                 return;
651
652         DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
653                 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
654
655         /* Get PTE, VSID, access mask */
656         pgdir = mm->pgd;
657         if (pgdir == NULL)
658                 return;
659         ptep = find_linux_pte(pgdir, ea);
660         if (!ptep)
661                 return;
662         vsid = get_vsid(mm->context.id, ea);
663
664         /* Hash it in */
665         local_irq_save(flags);
666         mask = cpumask_of_cpu(smp_processor_id());
667         if (cpus_equal(mm->cpu_vm_mask, mask))
668                 local = 1;
669 #ifndef CONFIG_PPC_64K_PAGES
670         __hash_page_4K(ea, access, vsid, ptep, trap, local);
671 #else
672         if (mmu_virtual_psize == MMU_PAGE_64K)
673                 __hash_page_64K(ea, access, vsid, ptep, trap, local);
674         else
675                 __hash_page_4K(ea, access, vsid, ptep, trap, local);
676 #endif /* CONFIG_PPC_64K_PAGES */
677         local_irq_restore(flags);
678 }
679
680 void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
681 {
682         unsigned long hash, index, shift, hidx, slot;
683
684         DBG_LOW("flush_hash_page(va=%016x)\n", va);
685         pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
686                 hash = hpt_hash(va, shift);
687                 hidx = __rpte_to_hidx(pte, index);
688                 if (hidx & _PTEIDX_SECONDARY)
689                         hash = ~hash;
690                 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
691                 slot += hidx & _PTEIDX_GROUP_IX;
692                 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
693                 ppc_md.hpte_invalidate(slot, va, psize, local);
694         } pte_iterate_hashed_end();
695 }
696
697 void flush_hash_range(unsigned long number, int local)
698 {
699         if (ppc_md.flush_hash_range)
700                 ppc_md.flush_hash_range(number, local);
701         else {
702                 int i;
703                 struct ppc64_tlb_batch *batch =
704                         &__get_cpu_var(ppc64_tlb_batch);
705
706                 for (i = 0; i < number; i++)
707                         flush_hash_page(batch->vaddr[i], batch->pte[i],
708                                         batch->psize, local);
709         }
710 }
711
712 static inline void make_bl(unsigned int *insn_addr, void *func)
713 {
714         unsigned long funcp = *((unsigned long *)func);
715         int offset = funcp - (unsigned long)insn_addr;
716
717         *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
718         flush_icache_range((unsigned long)insn_addr, 4+
719                            (unsigned long)insn_addr);
720 }
721
722 /*
723  * low_hash_fault is called when we the low level hash code failed
724  * to instert a PTE due to an hypervisor error
725  */
726 void low_hash_fault(struct pt_regs *regs, unsigned long address)
727 {
728         if (user_mode(regs)) {
729                 siginfo_t info;
730
731                 info.si_signo = SIGBUS;
732                 info.si_errno = 0;
733                 info.si_code = BUS_ADRERR;
734                 info.si_addr = (void __user *)address;
735                 force_sig_info(SIGBUS, &info, current);
736                 return;
737         }
738         bad_page_fault(regs, address, SIGBUS);
739 }
740
741 void __init htab_finish_init(void)
742 {
743         extern unsigned int *htab_call_hpte_insert1;
744         extern unsigned int *htab_call_hpte_insert2;
745         extern unsigned int *htab_call_hpte_remove;
746         extern unsigned int *htab_call_hpte_updatepp;
747
748 #ifdef CONFIG_PPC_64K_PAGES
749         extern unsigned int *ht64_call_hpte_insert1;
750         extern unsigned int *ht64_call_hpte_insert2;
751         extern unsigned int *ht64_call_hpte_remove;
752         extern unsigned int *ht64_call_hpte_updatepp;
753
754         make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
755         make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
756         make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
757         make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
758 #endif /* CONFIG_PPC_64K_PAGES */
759
760         make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
761         make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
762         make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
763         make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
764 }