Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[pandora-kernel.git] / include / asm-sparc64 / spitfire.h
1 /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2  * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  */
6
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
9
10 #include <asm/asi.h>
11
12 /* The following register addresses are accessible via ASI_DMMU
13  * and ASI_IMMU, that is there is a distinct and unique copy of
14  * each these registers for each TLB.
15  */
16 #define TSB_TAG_TARGET          0x0000000000000000 /* All chips                         */
17 #define TLB_SFSR                0x0000000000000018 /* All chips                         */
18 #define TSB_REG                 0x0000000000000028 /* All chips                         */
19 #define TLB_TAG_ACCESS          0x0000000000000030 /* All chips                         */
20 #define VIRT_WATCHPOINT         0x0000000000000038 /* All chips                         */
21 #define PHYS_WATCHPOINT         0x0000000000000040 /* All chips                         */
22 #define TSB_EXTENSION_P         0x0000000000000048 /* Ultra-III and later               */
23 #define TSB_EXTENSION_S         0x0000000000000050 /* Ultra-III and later, D-TLB only   */
24 #define TSB_EXTENSION_N         0x0000000000000058 /* Ultra-III and later               */
25 #define TLB_TAG_ACCESS_EXT      0x0000000000000060 /* Ultra-III+ and later              */
26
27 /* These registers only exist as one entity, and are accessed
28  * via ASI_DMMU only.
29  */
30 #define PRIMARY_CONTEXT         0x0000000000000008
31 #define SECONDARY_CONTEXT       0x0000000000000010
32 #define DMMU_SFAR               0x0000000000000020
33 #define VIRT_WATCHPOINT         0x0000000000000038
34 #define PHYS_WATCHPOINT         0x0000000000000040
35
36 #define SPITFIRE_HIGHEST_LOCKED_TLBENT  (64 - 1)
37 #define CHEETAH_HIGHEST_LOCKED_TLBENT   (16 - 1)
38
39 #define L1DCACHE_SIZE           0x4000
40
41 #ifndef __ASSEMBLY__
42
43 enum ultra_tlb_layout {
44         spitfire = 0,
45         cheetah = 1,
46         cheetah_plus = 2,
47 };
48
49 extern enum ultra_tlb_layout tlb_type;
50
51 extern int cheetah_pcache_forced_on;
52 extern void cheetah_enable_pcache(void);
53
54 #define sparc64_highest_locked_tlbent() \
55         (tlb_type == spitfire ? \
56          SPITFIRE_HIGHEST_LOCKED_TLBENT : \
57          CHEETAH_HIGHEST_LOCKED_TLBENT)
58
59 static __inline__ unsigned long spitfire_get_isfsr(void)
60 {
61         unsigned long ret;
62
63         __asm__ __volatile__("ldxa      [%1] %2, %0"
64                              : "=r" (ret)
65                              : "r" (TLB_SFSR), "i" (ASI_IMMU));
66         return ret;
67 }
68
69 static __inline__ unsigned long spitfire_get_dsfsr(void)
70 {
71         unsigned long ret;
72
73         __asm__ __volatile__("ldxa      [%1] %2, %0"
74                              : "=r" (ret)
75                              : "r" (TLB_SFSR), "i" (ASI_DMMU));
76         return ret;
77 }
78
79 static __inline__ unsigned long spitfire_get_sfar(void)
80 {
81         unsigned long ret;
82
83         __asm__ __volatile__("ldxa      [%1] %2, %0"
84                              : "=r" (ret)
85                              : "r" (DMMU_SFAR), "i" (ASI_DMMU));
86         return ret;
87 }
88
89 static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
90 {
91         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
92                              "membar    #Sync"
93                              : /* no outputs */
94                              : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
95 }
96
97 static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
98 {
99         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
100                              "membar    #Sync"
101                              : /* no outputs */
102                              : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
103 }
104
105 /* The data cache is write through, so this just invalidates the
106  * specified line.
107  */
108 static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
109 {
110         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
111                              "membar    #Sync"
112                              : /* No outputs */
113                              : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
114 }
115
116 /* The instruction cache lines are flushed with this, but note that
117  * this does not flush the pipeline.  It is possible for a line to
118  * get flushed but stale instructions to still be in the pipeline,
119  * a flush instruction (to any address) is sufficient to handle
120  * this issue after the line is invalidated.
121  */
122 static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
123 {
124         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
125                              "membar    #Sync"
126                              : /* No outputs */
127                              : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
128 }
129
130 static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
131 {
132         unsigned long data;
133
134         __asm__ __volatile__("ldxa      [%1] %2, %0"
135                              : "=r" (data)
136                              : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
137
138         /* Clear TTE diag bits. */
139         data &= ~0x0003fe0000000000UL;
140
141         return data;
142 }
143
144 static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
145 {
146         unsigned long tag;
147
148         __asm__ __volatile__("ldxa      [%1] %2, %0"
149                              : "=r" (tag)
150                              : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
151         return tag;
152 }
153
154 static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
155 {
156         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
157                              "membar    #Sync"
158                              : /* No outputs */
159                              : "r" (data), "r" (entry << 3),
160                                "i" (ASI_DTLB_DATA_ACCESS));
161 }
162
163 static __inline__ unsigned long spitfire_get_itlb_data(int entry)
164 {
165         unsigned long data;
166
167         __asm__ __volatile__("ldxa      [%1] %2, %0"
168                              : "=r" (data)
169                              : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
170
171         /* Clear TTE diag bits. */
172         data &= ~0x0003fe0000000000UL;
173
174         return data;
175 }
176
177 static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
178 {
179         unsigned long tag;
180
181         __asm__ __volatile__("ldxa      [%1] %2, %0"
182                              : "=r" (tag)
183                              : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
184         return tag;
185 }
186
187 static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
188 {
189         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
190                              "membar    #Sync"
191                              : /* No outputs */
192                              : "r" (data), "r" (entry << 3),
193                                "i" (ASI_ITLB_DATA_ACCESS));
194 }
195
196 /* Spitfire hardware assisted TLB flushes. */
197
198 /* Context level flushes. */
199 static __inline__ void spitfire_flush_dtlb_primary_context(void)
200 {
201         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
202                              "membar    #Sync"
203                              : /* No outputs */
204                              : "r" (0x40), "i" (ASI_DMMU_DEMAP));
205 }
206
207 static __inline__ void spitfire_flush_itlb_primary_context(void)
208 {
209         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
210                              "membar    #Sync"
211                              : /* No outputs */
212                              : "r" (0x40), "i" (ASI_IMMU_DEMAP));
213 }
214
215 static __inline__ void spitfire_flush_dtlb_secondary_context(void)
216 {
217         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
218                              "membar    #Sync"
219                              : /* No outputs */
220                              : "r" (0x50), "i" (ASI_DMMU_DEMAP));
221 }
222
223 static __inline__ void spitfire_flush_itlb_secondary_context(void)
224 {
225         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
226                              "membar    #Sync"
227                              : /* No outputs */
228                              : "r" (0x50), "i" (ASI_IMMU_DEMAP));
229 }
230
231 static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
232 {
233         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
234                              "membar    #Sync"
235                              : /* No outputs */
236                              : "r" (0x60), "i" (ASI_DMMU_DEMAP));
237 }
238
239 static __inline__ void spitfire_flush_itlb_nucleus_context(void)
240 {
241         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
242                              "membar    #Sync"
243                              : /* No outputs */
244                              : "r" (0x60), "i" (ASI_IMMU_DEMAP));
245 }
246
247 /* Page level flushes. */
248 static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
249 {
250         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
251                              "membar    #Sync"
252                              : /* No outputs */
253                              : "r" (page), "i" (ASI_DMMU_DEMAP));
254 }
255
256 static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
257 {
258         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
259                              "membar    #Sync"
260                              : /* No outputs */
261                              : "r" (page), "i" (ASI_IMMU_DEMAP));
262 }
263
264 static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
265 {
266         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
267                              "membar    #Sync"
268                              : /* No outputs */
269                              : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
270 }
271
272 static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
273 {
274         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
275                              "membar    #Sync"
276                              : /* No outputs */
277                              : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
278 }
279
280 static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
281 {
282         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
283                              "membar    #Sync"
284                              : /* No outputs */
285                              : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
286 }
287
288 static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
289 {
290         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
291                              "membar    #Sync"
292                              : /* No outputs */
293                              : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
294 }
295
296 /* Cheetah has "all non-locked" tlb flushes. */
297 static __inline__ void cheetah_flush_dtlb_all(void)
298 {
299         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
300                              "membar    #Sync"
301                              : /* No outputs */
302                              : "r" (0x80), "i" (ASI_DMMU_DEMAP));
303 }
304
305 static __inline__ void cheetah_flush_itlb_all(void)
306 {
307         __asm__ __volatile__("stxa      %%g0, [%0] %1\n\t"
308                              "membar    #Sync"
309                              : /* No outputs */
310                              : "r" (0x80), "i" (ASI_IMMU_DEMAP));
311 }
312
313 /* Cheetah has a 4-tlb layout so direct access is a bit different.
314  * The first two TLBs are fully assosciative, hold 16 entries, and are
315  * used only for locked and >8K sized translations.  One exists for
316  * data accesses and one for instruction accesses.
317  *
318  * The third TLB is for data accesses to 8K non-locked translations, is
319  * 2 way assosciative, and holds 512 entries.  The fourth TLB is for
320  * instruction accesses to 8K non-locked translations, is 2 way
321  * assosciative, and holds 128 entries.
322  *
323  * Cheetah has some bug where bogus data can be returned from
324  * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
325  * the problem for me. -DaveM
326  */
327 static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
328 {
329         unsigned long data;
330
331         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
332                              "ldxa      [%1] %2, %0"
333                              : "=r" (data)
334                              : "r" ((0 << 16) | (entry << 3)),
335                              "i" (ASI_DTLB_DATA_ACCESS));
336
337         return data;
338 }
339
340 static __inline__ unsigned long cheetah_get_litlb_data(int entry)
341 {
342         unsigned long data;
343
344         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
345                              "ldxa      [%1] %2, %0"
346                              : "=r" (data)
347                              : "r" ((0 << 16) | (entry << 3)),
348                              "i" (ASI_ITLB_DATA_ACCESS));
349
350         return data;
351 }
352
353 static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
354 {
355         unsigned long tag;
356
357         __asm__ __volatile__("ldxa      [%1] %2, %0"
358                              : "=r" (tag)
359                              : "r" ((0 << 16) | (entry << 3)),
360                              "i" (ASI_DTLB_TAG_READ));
361
362         return tag;
363 }
364
365 static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
366 {
367         unsigned long tag;
368
369         __asm__ __volatile__("ldxa      [%1] %2, %0"
370                              : "=r" (tag)
371                              : "r" ((0 << 16) | (entry << 3)),
372                              "i" (ASI_ITLB_TAG_READ));
373
374         return tag;
375 }
376
377 static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
378 {
379         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
380                              "membar    #Sync"
381                              : /* No outputs */
382                              : "r" (data),
383                                "r" ((0 << 16) | (entry << 3)),
384                                "i" (ASI_DTLB_DATA_ACCESS));
385 }
386
387 static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
388 {
389         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
390                              "membar    #Sync"
391                              : /* No outputs */
392                              : "r" (data),
393                                "r" ((0 << 16) | (entry << 3)),
394                                "i" (ASI_ITLB_DATA_ACCESS));
395 }
396
397 static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
398 {
399         unsigned long data;
400
401         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
402                              "ldxa      [%1] %2, %0"
403                              : "=r" (data)
404                              : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
405
406         return data;
407 }
408
409 static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
410 {
411         unsigned long tag;
412
413         __asm__ __volatile__("ldxa      [%1] %2, %0"
414                              : "=r" (tag)
415                              : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
416         return tag;
417 }
418
419 static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
420 {
421         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
422                              "membar    #Sync"
423                              : /* No outputs */
424                              : "r" (data),
425                                "r" ((tlb << 16) | (entry << 3)),
426                                "i" (ASI_DTLB_DATA_ACCESS));
427 }
428
429 static __inline__ unsigned long cheetah_get_itlb_data(int entry)
430 {
431         unsigned long data;
432
433         __asm__ __volatile__("ldxa      [%1] %2, %%g0\n\t"
434                              "ldxa      [%1] %2, %0"
435                              : "=r" (data)
436                              : "r" ((2 << 16) | (entry << 3)),
437                                "i" (ASI_ITLB_DATA_ACCESS));
438
439         return data;
440 }
441
442 static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
443 {
444         unsigned long tag;
445
446         __asm__ __volatile__("ldxa      [%1] %2, %0"
447                              : "=r" (tag)
448                              : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
449         return tag;
450 }
451
452 static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
453 {
454         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
455                              "membar    #Sync"
456                              : /* No outputs */
457                              : "r" (data), "r" ((2 << 16) | (entry << 3)),
458                                "i" (ASI_ITLB_DATA_ACCESS));
459 }
460
461 #endif /* !(__ASSEMBLY__) */
462
463 #endif /* !(_SPARC64_SPITFIRE_H) */