Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[pandora-kernel.git] / arch / blackfin / kernel / process.c
1 /*
2  * File:         arch/blackfin/kernel/process.c
3  * Based on:
4  * Author:
5  *
6  * Created:
7  * Description:  Blackfin architecture-dependent process handling.
8  *
9  * Modified:
10  *               Copyright 2004-2006 Analog Devices Inc.
11  *
12  * Bugs:         Enter bugs at http://blackfin.uclinux.org/
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, see the file COPYING, or write
26  * to the Free Software Foundation, Inc.,
27  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
28  */
29
30 #include <linux/module.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/user.h>
34 #include <linux/uaccess.h>
35 #include <linux/sched.h>
36 #include <linux/tick.h>
37 #include <linux/fs.h>
38 #include <linux/err.h>
39
40 #include <asm/blackfin.h>
41 #include <asm/fixed_code.h>
42 #include <asm/mem_map.h>
43
44 asmlinkage void ret_from_fork(void);
45
46 /* Points to the SDRAM backup memory for the stack that is currently in
47  * L1 scratchpad memory.
48  */
49 void *current_l1_stack_save;
50
51 /* The number of tasks currently using a L1 stack area.  The SRAM is
52  * allocated/deallocated whenever this changes from/to zero.
53  */
54 int nr_l1stack_tasks;
55
56 /* Start and length of the area in L1 scratchpad memory which we've allocated
57  * for process stacks.
58  */
59 void *l1_stack_base;
60 unsigned long l1_stack_len;
61
62 /*
63  * Powermanagement idle function, if any..
64  */
65 void (*pm_idle)(void) = NULL;
66 EXPORT_SYMBOL(pm_idle);
67
68 void (*pm_power_off)(void) = NULL;
69 EXPORT_SYMBOL(pm_power_off);
70
71 /*
72  * The idle loop on BFIN
73  */
74 #ifdef CONFIG_IDLE_L1
75 static void default_idle(void)__attribute__((l1_text));
76 void cpu_idle(void)__attribute__((l1_text));
77 #endif
78
79 /*
80  * This is our default idle handler.  We need to disable
81  * interrupts here to ensure we don't miss a wakeup call.
82  */
83 static void default_idle(void)
84 {
85 #ifdef CONFIG_IPIPE
86         ipipe_suspend_domain();
87 #endif
88         local_irq_disable_hw();
89         if (!need_resched())
90                 idle_with_irq_disabled();
91
92         local_irq_enable_hw();
93 }
94
95 /*
96  * The idle thread.  We try to conserve power, while trying to keep
97  * overall latency low.  The architecture specific idle is passed
98  * a value to indicate the level of "idleness" of the system.
99  */
100 void cpu_idle(void)
101 {
102         /* endless idle loop with no priority at all */
103         while (1) {
104                 void (*idle)(void) = pm_idle;
105
106 #ifdef CONFIG_HOTPLUG_CPU
107                 if (cpu_is_offline(smp_processor_id()))
108                         cpu_die();
109 #endif
110                 if (!idle)
111                         idle = default_idle;
112                 tick_nohz_stop_sched_tick(1);
113                 while (!need_resched())
114                         idle();
115                 tick_nohz_restart_sched_tick();
116                 preempt_enable_no_resched();
117                 schedule();
118                 preempt_disable();
119         }
120 }
121
122 /* Fill in the fpu structure for a core dump.  */
123
124 int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
125 {
126         return 1;
127 }
128
129 /*
130  * This gets run with P1 containing the
131  * function to call, and R1 containing
132  * the "args".  Note P0 is clobbered on the way here.
133  */
134 void kernel_thread_helper(void);
135 __asm__(".section .text\n"
136         ".align 4\n"
137         "_kernel_thread_helper:\n\t"
138         "\tsp += -12;\n\t"
139         "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
140
141 /*
142  * Create a kernel thread.
143  */
144 pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
145 {
146         struct pt_regs regs;
147
148         memset(&regs, 0, sizeof(regs));
149
150         regs.r1 = (unsigned long)arg;
151         regs.p1 = (unsigned long)fn;
152         regs.pc = (unsigned long)kernel_thread_helper;
153         regs.orig_p0 = -1;
154         /* Set bit 2 to tell ret_from_fork we should be returning to kernel
155            mode.  */
156         regs.ipend = 0x8002;
157         __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
158         return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
159                        NULL);
160 }
161 EXPORT_SYMBOL(kernel_thread);
162
163 /*
164  * Do necessary setup to start up a newly executed thread.
165  *
166  * pass the data segment into user programs if it exists,
167  * it can't hurt anything as far as I can tell
168  */
169 void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
170 {
171         set_fs(USER_DS);
172         regs->pc = new_ip;
173         if (current->mm)
174                 regs->p5 = current->mm->start_data;
175 #ifdef CONFIG_SMP
176         task_thread_info(current)->l1_task_info.stack_start =
177                 (void *)current->mm->context.stack_start;
178         task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
179         memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
180                sizeof(*L1_SCRATCH_TASK_INFO));
181 #endif
182         wrusp(new_sp);
183 }
184 EXPORT_SYMBOL_GPL(start_thread);
185
186 void flush_thread(void)
187 {
188 }
189
190 asmlinkage int bfin_vfork(struct pt_regs *regs)
191 {
192         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
193                        NULL);
194 }
195
196 asmlinkage int bfin_clone(struct pt_regs *regs)
197 {
198         unsigned long clone_flags;
199         unsigned long newsp;
200
201 #ifdef __ARCH_SYNC_CORE_DCACHE
202         if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
203                 current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
204                 current->rt.nr_cpus_allowed = 1;
205         }
206 #endif
207
208         /* syscall2 puts clone_flags in r0 and usp in r1 */
209         clone_flags = regs->r0;
210         newsp = regs->r1;
211         if (!newsp)
212                 newsp = rdusp();
213         else
214                 newsp -= 12;
215         return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
216 }
217
218 int
219 copy_thread(unsigned long clone_flags,
220             unsigned long usp, unsigned long topstk,
221             struct task_struct *p, struct pt_regs *regs)
222 {
223         struct pt_regs *childregs;
224
225         childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
226         *childregs = *regs;
227         childregs->r0 = 0;
228
229         p->thread.usp = usp;
230         p->thread.ksp = (unsigned long)childregs;
231         p->thread.pc = (unsigned long)ret_from_fork;
232
233         return 0;
234 }
235
236 /*
237  * sys_execve() executes a new program.
238  */
239
240 asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
241 {
242         int error;
243         char *filename;
244         struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
245
246         lock_kernel();
247         filename = getname(name);
248         error = PTR_ERR(filename);
249         if (IS_ERR(filename))
250                 goto out;
251         error = do_execve(filename, argv, envp, regs);
252         putname(filename);
253  out:
254         unlock_kernel();
255         return error;
256 }
257
258 unsigned long get_wchan(struct task_struct *p)
259 {
260         unsigned long fp, pc;
261         unsigned long stack_page;
262         int count = 0;
263         if (!p || p == current || p->state == TASK_RUNNING)
264                 return 0;
265
266         stack_page = (unsigned long)p;
267         fp = p->thread.usp;
268         do {
269                 if (fp < stack_page + sizeof(struct thread_info) ||
270                     fp >= 8184 + stack_page)
271                         return 0;
272                 pc = ((unsigned long *)fp)[1];
273                 if (!in_sched_functions(pc))
274                         return pc;
275                 fp = *(unsigned long *)fp;
276         }
277         while (count++ < 16);
278         return 0;
279 }
280
281 void finish_atomic_sections (struct pt_regs *regs)
282 {
283         int __user *up0 = (int __user *)regs->p0;
284
285         if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
286                 return;
287
288         switch (regs->pc) {
289         case ATOMIC_XCHG32 + 2:
290                 put_user(regs->r1, up0);
291                 regs->pc += 2;
292                 break;
293
294         case ATOMIC_CAS32 + 2:
295         case ATOMIC_CAS32 + 4:
296                 if (regs->r0 == regs->r1)
297                         put_user(regs->r2, up0);
298                 regs->pc = ATOMIC_CAS32 + 8;
299                 break;
300         case ATOMIC_CAS32 + 6:
301                 put_user(regs->r2, up0);
302                 regs->pc += 2;
303                 break;
304
305         case ATOMIC_ADD32 + 2:
306                 regs->r0 = regs->r1 + regs->r0;
307                 /* fall through */
308         case ATOMIC_ADD32 + 4:
309                 put_user(regs->r0, up0);
310                 regs->pc = ATOMIC_ADD32 + 6;
311                 break;
312
313         case ATOMIC_SUB32 + 2:
314                 regs->r0 = regs->r1 - regs->r0;
315                 /* fall through */
316         case ATOMIC_SUB32 + 4:
317                 put_user(regs->r0, up0);
318                 regs->pc = ATOMIC_SUB32 + 6;
319                 break;
320
321         case ATOMIC_IOR32 + 2:
322                 regs->r0 = regs->r1 | regs->r0;
323                 /* fall through */
324         case ATOMIC_IOR32 + 4:
325                 put_user(regs->r0, up0);
326                 regs->pc = ATOMIC_IOR32 + 6;
327                 break;
328
329         case ATOMIC_AND32 + 2:
330                 regs->r0 = regs->r1 & regs->r0;
331                 /* fall through */
332         case ATOMIC_AND32 + 4:
333                 put_user(regs->r0, up0);
334                 regs->pc = ATOMIC_AND32 + 6;
335                 break;
336
337         case ATOMIC_XOR32 + 2:
338                 regs->r0 = regs->r1 ^ regs->r0;
339                 /* fall through */
340         case ATOMIC_XOR32 + 4:
341                 put_user(regs->r0, up0);
342                 regs->pc = ATOMIC_XOR32 + 6;
343                 break;
344         }
345 }
346
347 static inline
348 int in_mem(unsigned long addr, unsigned long size,
349            unsigned long start, unsigned long end)
350 {
351         return addr >= start && addr + size <= end;
352 }
353 static inline
354 int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
355                      unsigned long const_addr, unsigned long const_size)
356 {
357         return const_size &&
358                in_mem(addr, size, const_addr + off, const_addr + const_size);
359 }
360 static inline
361 int in_mem_const(unsigned long addr, unsigned long size,
362                  unsigned long const_addr, unsigned long const_size)
363 {
364         return in_mem_const_off(addr, size, 0, const_addr, const_size);
365 }
366 #define IN_ASYNC(bnum, bctlnum) \
367 ({ \
368         (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
369         bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
370         BFIN_MEM_ACCESS_CORE; \
371 })
372
373 int bfin_mem_access_type(unsigned long addr, unsigned long size)
374 {
375         int cpu = raw_smp_processor_id();
376
377         /* Check that things do not wrap around */
378         if (addr > ULONG_MAX - size)
379                 return -EFAULT;
380
381         if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
382                 return BFIN_MEM_ACCESS_CORE;
383
384         if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
385                 return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
386         if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
387                 return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
388         if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
389                 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
390         if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
391                 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
392 #ifdef COREB_L1_CODE_START
393         if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
394                 return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
395         if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
396                 return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
397         if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
398                 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
399         if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
400                 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
401 #endif
402         if (in_mem_const(addr, size, L2_START, L2_LENGTH))
403                 return BFIN_MEM_ACCESS_CORE;
404
405         if (addr >= SYSMMR_BASE)
406                 return BFIN_MEM_ACCESS_CORE_ONLY;
407
408         /* We can't read EBIU banks that aren't enabled or we end up hanging
409          * on the access to the async space.
410          */
411         if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
412                 return IN_ASYNC(0, 0);
413         if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
414                 return IN_ASYNC(1, 0);
415         if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
416                 return IN_ASYNC(2, 1);
417         if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
418                 return IN_ASYNC(3, 1);
419
420         if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
421                 return BFIN_MEM_ACCESS_CORE;
422         if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
423                 return BFIN_MEM_ACCESS_DMA;
424
425         return -EFAULT;
426 }
427
428 #if defined(CONFIG_ACCESS_CHECK)
429 #ifdef CONFIG_ACCESS_OK_L1
430 __attribute__((l1_text))
431 #endif
432 /* Return 1 if access to memory range is OK, 0 otherwise */
433 int _access_ok(unsigned long addr, unsigned long size)
434 {
435         if (size == 0)
436                 return 1;
437         /* Check that things do not wrap around */
438         if (addr > ULONG_MAX - size)
439                 return 0;
440         if (segment_eq(get_fs(), KERNEL_DS))
441                 return 1;
442 #ifdef CONFIG_MTD_UCLINUX
443         if (1)
444 #else
445         if (0)
446 #endif
447         {
448                 if (in_mem(addr, size, memory_start, memory_end))
449                         return 1;
450                 if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
451                         return 1;
452 # ifndef CONFIG_ROMFS_ON_MTD
453                 if (0)
454 # endif
455                         /* For XIP, allow user space to use pointers within the ROMFS.  */
456                         if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
457                                 return 1;
458         } else {
459                 if (in_mem(addr, size, memory_start, physical_mem_end))
460                         return 1;
461         }
462
463         if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
464                 return 1;
465
466         if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
467                 return 1;
468         if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
469                 return 1;
470         if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
471                 return 1;
472         if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
473                 return 1;
474 #ifdef COREB_L1_CODE_START
475         if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
476                 return 1;
477         if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
478                 return 1;
479         if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
480                 return 1;
481         if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
482                 return 1;
483 #endif
484         if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
485                 return 1;
486
487         if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
488                 return 1;
489         if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
490                 return 1;
491
492         return 0;
493 }
494 EXPORT_SYMBOL(_access_ok);
495 #endif /* CONFIG_ACCESS_CHECK */