Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / arch / blackfin / kernel / cplb-mpu / cplbmgr.c
1 /*
2  *               Blackfin CPLB exception handling.
3  *               Copyright 2004-2007 Analog Devices Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see the file COPYING, or write
17  * to the Free Software Foundation, Inc.,
18  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  */
20 #include <linux/module.h>
21 #include <linux/mm.h>
22
23 #include <asm/blackfin.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cplb.h>
26 #include <asm/cplbinit.h>
27 #include <asm/mmu_context.h>
28
29 /*
30  * WARNING
31  *
32  * This file is compiled with certain -ffixed-reg options.  We have to
33  * make sure not to call any functions here that could clobber these
34  * registers.
35  */
36
37 int page_mask_nelts;
38 int page_mask_order;
39 unsigned long *current_rwx_mask[NR_CPUS];
40
41 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
42 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
43 int nr_cplb_flush[NR_CPUS];
44
45 /*
46  * Given the contents of the status register, return the index of the
47  * CPLB that caused the fault.
48  */
49 static inline int faulting_cplb_index(int status)
50 {
51         int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
52         return 30 - signbits;
53 }
54
55 /*
56  * Given the contents of the status register and the DCPLB_DATA contents,
57  * return true if a write access should be permitted.
58  */
59 static inline int write_permitted(int status, unsigned long data)
60 {
61         if (status & FAULT_USERSUPV)
62                 return !!(data & CPLB_SUPV_WR);
63         else
64                 return !!(data & CPLB_USER_WR);
65 }
66
67 /* Counters to implement round-robin replacement.  */
68 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
69
70 /*
71  * Find an ICPLB entry to be evicted and return its index.
72  */
73 static int evict_one_icplb(unsigned int cpu)
74 {
75         int i;
76         for (i = first_switched_icplb; i < MAX_CPLBS; i++)
77                 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
78                         return i;
79         i = first_switched_icplb + icplb_rr_index[cpu];
80         if (i >= MAX_CPLBS) {
81                 i -= MAX_CPLBS - first_switched_icplb;
82                 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
83         }
84         icplb_rr_index[cpu]++;
85         return i;
86 }
87
88 static int evict_one_dcplb(unsigned int cpu)
89 {
90         int i;
91         for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
92                 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
93                         return i;
94         i = first_switched_dcplb + dcplb_rr_index[cpu];
95         if (i >= MAX_CPLBS) {
96                 i -= MAX_CPLBS - first_switched_dcplb;
97                 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
98         }
99         dcplb_rr_index[cpu]++;
100         return i;
101 }
102
103 static noinline int dcplb_miss(unsigned int cpu)
104 {
105         unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
106         int status = bfin_read_DCPLB_STATUS();
107         unsigned long *mask;
108         int idx;
109         unsigned long d_data;
110
111         nr_dcplb_miss[cpu]++;
112
113         d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
114 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
115         if (bfin_addr_dcacheable(addr)) {
116                 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
117 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
118                 d_data |= CPLB_L1_AOW | CPLB_WT;
119 # endif
120         }
121 #endif
122
123         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
124                 addr = L2_START;
125                 d_data = L2_DMEMORY;
126         } else if (addr >= physical_mem_end) {
127                 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
128                     && (status & FAULT_USERSUPV)) {
129                         addr &= ~0x3fffff;
130                         d_data &= ~PAGE_SIZE_4KB;
131                         d_data |= PAGE_SIZE_4MB;
132                 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
133                     && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
134                         addr &= ~(1 * 1024 * 1024 - 1);
135                         d_data &= ~PAGE_SIZE_4KB;
136                         d_data |= PAGE_SIZE_1MB;
137                 } else
138                         return CPLB_PROT_VIOL;
139         } else if (addr >= _ramend) {
140             d_data |= CPLB_USER_RD | CPLB_USER_WR;
141         } else {
142                 mask = current_rwx_mask[cpu];
143                 if (mask) {
144                         int page = addr >> PAGE_SHIFT;
145                         int idx = page >> 5;
146                         int bit = 1 << (page & 31);
147
148                         if (mask[idx] & bit)
149                                 d_data |= CPLB_USER_RD;
150
151                         mask += page_mask_nelts;
152                         if (mask[idx] & bit)
153                                 d_data |= CPLB_USER_WR;
154                 }
155         }
156         idx = evict_one_dcplb(cpu);
157
158         addr &= PAGE_MASK;
159         dcplb_tbl[cpu][idx].addr = addr;
160         dcplb_tbl[cpu][idx].data = d_data;
161
162         _disable_dcplb();
163         bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
164         bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
165         _enable_dcplb();
166
167         return 0;
168 }
169
170 static noinline int icplb_miss(unsigned int cpu)
171 {
172         unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
173         int status = bfin_read_ICPLB_STATUS();
174         int idx;
175         unsigned long i_data;
176
177         nr_icplb_miss[cpu]++;
178
179         /* If inside the uncached DMA region, fault.  */
180         if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
181                 return CPLB_PROT_VIOL;
182
183         if (status & FAULT_USERSUPV)
184                 nr_icplb_supv_miss[cpu]++;
185
186         /*
187          * First, try to find a CPLB that matches this address.  If we
188          * find one, then the fact that we're in the miss handler means
189          * that the instruction crosses a page boundary.
190          */
191         for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
192                 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
193                         unsigned long this_addr = icplb_tbl[cpu][idx].addr;
194                         if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
195                                 addr += PAGE_SIZE;
196                                 break;
197                         }
198                 }
199         }
200
201         i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
202
203 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
204         /*
205          * Normal RAM, and possibly the reserved memory area, are
206          * cacheable.
207          */
208         if (addr < _ramend ||
209             (addr < physical_mem_end && reserved_mem_icache_on))
210                 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
211 #endif
212
213         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
214                 addr = L2_START;
215                 i_data = L2_IMEMORY;
216         } else if (addr >= physical_mem_end) {
217                 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
218                     && (status & FAULT_USERSUPV)) {
219                         addr &= ~(1 * 1024 * 1024 - 1);
220                         i_data &= ~PAGE_SIZE_4KB;
221                         i_data |= PAGE_SIZE_1MB;
222                 } else
223                     return CPLB_PROT_VIOL;
224         } else if (addr >= _ramend) {
225                 i_data |= CPLB_USER_RD;
226         } else {
227                 /*
228                  * Two cases to distinguish - a supervisor access must
229                  * necessarily be for a module page; we grant it
230                  * unconditionally (could do better here in the future).
231                  * Otherwise, check the x bitmap of the current process.
232                  */
233                 if (!(status & FAULT_USERSUPV)) {
234                         unsigned long *mask = current_rwx_mask[cpu];
235
236                         if (mask) {
237                                 int page = addr >> PAGE_SHIFT;
238                                 int idx = page >> 5;
239                                 int bit = 1 << (page & 31);
240
241                                 mask += 2 * page_mask_nelts;
242                                 if (mask[idx] & bit)
243                                         i_data |= CPLB_USER_RD;
244                         }
245                 }
246         }
247         idx = evict_one_icplb(cpu);
248         addr &= PAGE_MASK;
249         icplb_tbl[cpu][idx].addr = addr;
250         icplb_tbl[cpu][idx].data = i_data;
251
252         _disable_icplb();
253         bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
254         bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
255         _enable_icplb();
256
257         return 0;
258 }
259
260 static noinline int dcplb_protection_fault(unsigned int cpu)
261 {
262         int status = bfin_read_DCPLB_STATUS();
263
264         nr_dcplb_prot[cpu]++;
265
266         if (status & FAULT_RW) {
267                 int idx = faulting_cplb_index(status);
268                 unsigned long data = dcplb_tbl[cpu][idx].data;
269                 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
270                     write_permitted(status, data)) {
271                         data |= CPLB_DIRTY;
272                         dcplb_tbl[cpu][idx].data = data;
273                         bfin_write32(DCPLB_DATA0 + idx * 4, data);
274                         return 0;
275                 }
276         }
277         return CPLB_PROT_VIOL;
278 }
279
280 int cplb_hdr(int seqstat, struct pt_regs *regs)
281 {
282         int cause = seqstat & 0x3f;
283         unsigned int cpu = raw_smp_processor_id();
284         switch (cause) {
285         case 0x23:
286                 return dcplb_protection_fault(cpu);
287         case 0x2C:
288                 return icplb_miss(cpu);
289         case 0x26:
290                 return dcplb_miss(cpu);
291         default:
292                 return 1;
293         }
294 }
295
296 void flush_switched_cplbs(unsigned int cpu)
297 {
298         int i;
299         unsigned long flags;
300
301         nr_cplb_flush[cpu]++;
302
303         local_irq_save_hw(flags);
304         _disable_icplb();
305         for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
306                 icplb_tbl[cpu][i].data = 0;
307                 bfin_write32(ICPLB_DATA0 + i * 4, 0);
308         }
309         _enable_icplb();
310
311         _disable_dcplb();
312         for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
313                 dcplb_tbl[cpu][i].data = 0;
314                 bfin_write32(DCPLB_DATA0 + i * 4, 0);
315         }
316         _enable_dcplb();
317         local_irq_restore_hw(flags);
318
319 }
320
321 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
322 {
323         int i;
324         unsigned long addr = (unsigned long)masks;
325         unsigned long d_data;
326         unsigned long flags;
327
328         if (!masks) {
329                 current_rwx_mask[cpu] = masks;
330                 return;
331         }
332
333         local_irq_save_hw(flags);
334         current_rwx_mask[cpu] = masks;
335
336         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
337                 addr = L2_START;
338                 d_data = L2_DMEMORY;
339         } else {
340                 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
341 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
342                 d_data |= CPLB_L1_CHBL;
343 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
344                 d_data |= CPLB_L1_AOW | CPLB_WT;
345 # endif
346 #endif
347         }
348
349         _disable_dcplb();
350         for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
351                 dcplb_tbl[cpu][i].addr = addr;
352                 dcplb_tbl[cpu][i].data = d_data;
353                 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
354                 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
355                 addr += PAGE_SIZE;
356         }
357         _enable_dcplb();
358         local_irq_restore_hw(flags);
359 }