Merge branches 'sh/serial-rework' and 'sh/oprofile'
[pandora-kernel.git] / arch / ia64 / include / asm / xen / inst.h
1 /******************************************************************************
2  * arch/ia64/include/asm/xen/inst.h
3  *
4  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5  *                    VA Linux Systems Japan K.K.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  *
21  */
22
23 #include <asm/xen/privop.h>
24
25 #define ia64_ivt                                xen_ivt
26 #define DO_SAVE_MIN                             XEN_DO_SAVE_MIN
27
28 #define __paravirt_switch_to                    xen_switch_to
29 #define __paravirt_leave_syscall                xen_leave_syscall
30 #define __paravirt_work_processed_syscall       xen_work_processed_syscall
31 #define __paravirt_leave_kernel                 xen_leave_kernel
32 #define __paravirt_pending_syscall_end          xen_work_pending_syscall_end
33 #define __paravirt_work_processed_syscall_target \
34                                                 xen_work_processed_syscall
35
36 #define MOV_FROM_IFA(reg)       \
37         movl reg = XSI_IFA;     \
38         ;;                      \
39         ld8 reg = [reg]
40
41 #define MOV_FROM_ITIR(reg)      \
42         movl reg = XSI_ITIR;    \
43         ;;                      \
44         ld8 reg = [reg]
45
46 #define MOV_FROM_ISR(reg)       \
47         movl reg = XSI_ISR;     \
48         ;;                      \
49         ld8 reg = [reg]
50
51 #define MOV_FROM_IHA(reg)       \
52         movl reg = XSI_IHA;     \
53         ;;                      \
54         ld8 reg = [reg]
55
56 #define MOV_FROM_IPSR(pred, reg)        \
57 (pred)  movl reg = XSI_IPSR;            \
58         ;;                              \
59 (pred)  ld8 reg = [reg]
60
61 #define MOV_FROM_IIM(reg)       \
62         movl reg = XSI_IIM;     \
63         ;;                      \
64         ld8 reg = [reg]
65
66 #define MOV_FROM_IIP(reg)       \
67         movl reg = XSI_IIP;     \
68         ;;                      \
69         ld8 reg = [reg]
70
71 .macro __MOV_FROM_IVR reg, clob
72         .ifc "\reg", "r8"
73                 XEN_HYPER_GET_IVR
74                 .exitm
75         .endif
76         .ifc "\clob", "r8"
77                 XEN_HYPER_GET_IVR
78                 ;;
79                 mov \reg = r8
80                 .exitm
81         .endif
82
83         mov \clob = r8
84         ;;
85         XEN_HYPER_GET_IVR
86         ;;
87         mov \reg = r8
88         ;;
89         mov r8 = \clob
90 .endm
91 #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
92
93 .macro __MOV_FROM_PSR pred, reg, clob
94         .ifc "\reg", "r8"
95                 (\pred) XEN_HYPER_GET_PSR;
96                 .exitm
97         .endif
98         .ifc "\clob", "r8"
99                 (\pred) XEN_HYPER_GET_PSR
100                 ;;
101                 (\pred) mov \reg = r8
102                 .exitm
103         .endif
104
105         (\pred) mov \clob = r8
106         (\pred) XEN_HYPER_GET_PSR
107         ;;
108         (\pred) mov \reg = r8
109         (\pred) mov r8 = \clob
110 .endm
111 #define MOV_FROM_PSR(pred, reg, clob)   __MOV_FROM_PSR pred, reg, clob
112
113
114 #define MOV_TO_IFA(reg, clob)   \
115         movl clob = XSI_IFA;    \
116         ;;                      \
117         st8 [clob] = reg        \
118
119 #define MOV_TO_ITIR(pred, reg, clob)    \
120 (pred)  movl clob = XSI_ITIR;           \
121         ;;                              \
122 (pred)  st8 [clob] = reg
123
124 #define MOV_TO_IHA(pred, reg, clob)     \
125 (pred)  movl clob = XSI_IHA;            \
126         ;;                              \
127 (pred)  st8 [clob] = reg
128
129 #define MOV_TO_IPSR(pred, reg, clob)    \
130 (pred)  movl clob = XSI_IPSR;           \
131         ;;                              \
132 (pred)  st8 [clob] = reg;               \
133         ;;
134
135 #define MOV_TO_IFS(pred, reg, clob)     \
136 (pred)  movl clob = XSI_IFS;            \
137         ;;                              \
138 (pred)  st8 [clob] = reg;               \
139         ;;
140
141 #define MOV_TO_IIP(reg, clob)   \
142         movl clob = XSI_IIP;    \
143         ;;                      \
144         st8 [clob] = reg
145
146 .macro ____MOV_TO_KR kr, reg, clob0, clob1
147         .ifc "\clob0", "r9"
148                 .error "clob0 \clob0 must not be r9"
149         .endif
150         .ifc "\clob1", "r8"
151                 .error "clob1 \clob1 must not be r8"
152         .endif
153
154         .ifnc "\reg", "r9"
155                 .ifnc "\clob1", "r9"
156                         mov \clob1 = r9
157                 .endif
158                 mov r9 = \reg
159         .endif
160         .ifnc "\clob0", "r8"
161                 mov \clob0 = r8
162         .endif
163         mov r8 = \kr
164         ;;
165         XEN_HYPER_SET_KR
166
167         .ifnc "\reg", "r9"
168                 .ifnc "\clob1", "r9"
169                         mov r9 = \clob1
170                 .endif
171         .endif
172         .ifnc "\clob0", "r8"
173                 mov r8 = \clob0
174         .endif
175 .endm
176
177 .macro __MOV_TO_KR kr, reg, clob0, clob1
178         .ifc "\clob0", "r9"
179                 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
180                 .exitm
181         .endif
182         .ifc "\clob1", "r8"
183                 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
184                 .exitm
185         .endif
186
187         ____MOV_TO_KR \kr, \reg, \clob0, \clob1
188 .endm
189
190 #define MOV_TO_KR(kr, reg, clob0, clob1) \
191         __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
192
193
194 .macro __ITC_I pred, reg, clob
195         .ifc "\reg", "r8"
196                 (\pred) XEN_HYPER_ITC_I
197                 .exitm
198         .endif
199         .ifc "\clob", "r8"
200                 (\pred) mov r8 = \reg
201                 ;;
202                 (\pred) XEN_HYPER_ITC_I
203                 .exitm
204         .endif
205
206         (\pred) mov \clob = r8
207         (\pred) mov r8 = \reg
208         ;;
209         (\pred) XEN_HYPER_ITC_I
210         ;;
211         (\pred) mov r8 = \clob
212         ;;
213 .endm
214 #define ITC_I(pred, reg, clob)  __ITC_I pred, reg, clob
215
216 .macro __ITC_D pred, reg, clob
217         .ifc "\reg", "r8"
218                 (\pred) XEN_HYPER_ITC_D
219                 ;;
220                 .exitm
221         .endif
222         .ifc "\clob", "r8"
223                 (\pred) mov r8 = \reg
224                 ;;
225                 (\pred) XEN_HYPER_ITC_D
226                 ;;
227                 .exitm
228         .endif
229
230         (\pred) mov \clob = r8
231         (\pred) mov r8 = \reg
232         ;;
233         (\pred) XEN_HYPER_ITC_D
234         ;;
235         (\pred) mov r8 = \clob
236         ;;
237 .endm
238 #define ITC_D(pred, reg, clob)  __ITC_D pred, reg, clob
239
240 .macro __ITC_I_AND_D pred_i, pred_d, reg, clob
241         .ifc "\reg", "r8"
242                 (\pred_i)XEN_HYPER_ITC_I
243                 ;;
244                 (\pred_d)XEN_HYPER_ITC_D
245                 ;;
246                 .exitm
247         .endif
248         .ifc "\clob", "r8"
249                 mov r8 = \reg
250                 ;;
251                 (\pred_i)XEN_HYPER_ITC_I
252                 ;;
253                 (\pred_d)XEN_HYPER_ITC_D
254                 ;;
255                 .exitm
256         .endif
257
258         mov \clob = r8
259         mov r8 = \reg
260         ;;
261         (\pred_i)XEN_HYPER_ITC_I
262         ;;
263         (\pred_d)XEN_HYPER_ITC_D
264         ;;
265         mov r8 = \clob
266         ;;
267 .endm
268 #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
269         __ITC_I_AND_D pred_i, pred_d, reg, clob
270
271 .macro __THASH pred, reg0, reg1, clob
272         .ifc "\reg0", "r8"
273                 (\pred) mov r8 = \reg1
274                 (\pred) XEN_HYPER_THASH
275                 .exitm
276         .endc
277         .ifc "\reg1", "r8"
278                 (\pred) XEN_HYPER_THASH
279                 ;;
280                 (\pred) mov \reg0 = r8
281                 ;;
282                 .exitm
283         .endif
284         .ifc "\clob", "r8"
285                 (\pred) mov r8 = \reg1
286                 (\pred) XEN_HYPER_THASH
287                 ;;
288                 (\pred) mov \reg0 = r8
289                 ;;
290                 .exitm
291         .endif
292
293         (\pred) mov \clob = r8
294         (\pred) mov r8 = \reg1
295         (\pred) XEN_HYPER_THASH
296         ;;
297         (\pred) mov \reg0 = r8
298         (\pred) mov r8 = \clob
299         ;;
300 .endm
301 #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
302
303 #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1)    \
304         mov clob0 = 1;                                          \
305         movl clob1 = XSI_PSR_IC;                                \
306         ;;                                                      \
307         st4 [clob1] = clob0                                     \
308         ;;
309
310 #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1)     \
311         ;;                                      \
312         srlz.d;                                 \
313         mov clob1 = 1;                          \
314         movl clob0 = XSI_PSR_IC;                \
315         ;;                                      \
316         st4 [clob0] = clob1
317
318 #define RSM_PSR_IC(clob)        \
319         movl clob = XSI_PSR_IC; \
320         ;;                      \
321         st4 [clob] = r0;        \
322         ;;
323
324 /* pred will be clobbered */
325 #define MASK_TO_PEND_OFS    (-1)
326 #define SSM_PSR_I(pred, pred_clob, clob)                                \
327 (pred)  movl clob = XSI_PSR_I_ADDR                                      \
328         ;;                                                              \
329 (pred)  ld8 clob = [clob]                                               \
330         ;;                                                              \
331         /* if (pred) vpsr.i = 1 */                                      \
332         /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */         \
333 (pred)  st1 [clob] = r0, MASK_TO_PEND_OFS                               \
334         ;;                                                              \
335         /* if (vcpu->vcpu_info->evtchn_upcall_pending) */               \
336 (pred)  ld1 clob = [clob]                                               \
337         ;;                                                              \
338 (pred)  cmp.ne.unc pred_clob, p0 = clob, r0                             \
339         ;;                                                              \
340 (pred_clob)XEN_HYPER_SSM_I      /* do areal ssm psr.i */
341
342 #define RSM_PSR_I(pred, clob0, clob1)   \
343         movl clob0 = XSI_PSR_I_ADDR;    \
344         mov clob1 = 1;                  \
345         ;;                              \
346         ld8 clob0 = [clob0];            \
347         ;;                              \
348 (pred)  st1 [clob0] = clob1
349
350 #define RSM_PSR_I_IC(clob0, clob1, clob2)               \
351         movl clob0 = XSI_PSR_I_ADDR;                    \
352         movl clob1 = XSI_PSR_IC;                        \
353         ;;                                              \
354         ld8 clob0 = [clob0];                            \
355         mov clob2 = 1;                                  \
356         ;;                                              \
357         /* note: clears both vpsr.i and vpsr.ic! */     \
358         st1 [clob0] = clob2;                            \
359         st4 [clob1] = r0;                               \
360         ;;
361
362 #define RSM_PSR_DT              \
363         XEN_HYPER_RSM_PSR_DT
364
365 #define SSM_PSR_DT_AND_SRLZ_I   \
366         XEN_HYPER_SSM_PSR_DT
367
368 #define BSW_0(clob0, clob1, clob2)                      \
369         ;;                                              \
370         /* r16-r31 all now hold bank1 values */         \
371         mov clob2 = ar.unat;                            \
372         movl clob0 = XSI_BANK1_R16;                     \
373         movl clob1 = XSI_BANK1_R16 + 8;                 \
374         ;;                                              \
375 .mem.offset 0, 0; st8.spill [clob0] = r16, 16;          \
376 .mem.offset 8, 0; st8.spill [clob1] = r17, 16;          \
377         ;;                                              \
378 .mem.offset 0, 0; st8.spill [clob0] = r18, 16;          \
379 .mem.offset 8, 0; st8.spill [clob1] = r19, 16;          \
380         ;;                                              \
381 .mem.offset 0, 0; st8.spill [clob0] = r20, 16;          \
382 .mem.offset 8, 0; st8.spill [clob1] = r21, 16;          \
383         ;;                                              \
384 .mem.offset 0, 0; st8.spill [clob0] = r22, 16;          \
385 .mem.offset 8, 0; st8.spill [clob1] = r23, 16;          \
386         ;;                                              \
387 .mem.offset 0, 0; st8.spill [clob0] = r24, 16;          \
388 .mem.offset 8, 0; st8.spill [clob1] = r25, 16;          \
389         ;;                                              \
390 .mem.offset 0, 0; st8.spill [clob0] = r26, 16;          \
391 .mem.offset 8, 0; st8.spill [clob1] = r27, 16;          \
392         ;;                                              \
393 .mem.offset 0, 0; st8.spill [clob0] = r28, 16;          \
394 .mem.offset 8, 0; st8.spill [clob1] = r29, 16;          \
395         ;;                                              \
396 .mem.offset 0, 0; st8.spill [clob0] = r30, 16;          \
397 .mem.offset 8, 0; st8.spill [clob1] = r31, 16;          \
398         ;;                                              \
399         mov clob1 = ar.unat;                            \
400         movl clob0 = XSI_B1NAT;                         \
401         ;;                                              \
402         st8 [clob0] = clob1;                            \
403         mov ar.unat = clob2;                            \
404         movl clob0 = XSI_BANKNUM;                       \
405         ;;                                              \
406         st4 [clob0] = r0
407
408
409         /* FIXME: THIS CODE IS NOT NaT SAFE! */
410 #define XEN_BSW_1(clob)                 \
411         mov clob = ar.unat;             \
412         movl r30 = XSI_B1NAT;           \
413         ;;                              \
414         ld8 r30 = [r30];                \
415         mov r31 = 1;                    \
416         ;;                              \
417         mov ar.unat = r30;              \
418         movl r30 = XSI_BANKNUM;         \
419         ;;                              \
420         st4 [r30] = r31;                \
421         movl r30 = XSI_BANK1_R16;       \
422         movl r31 = XSI_BANK1_R16+8;     \
423         ;;                              \
424         ld8.fill r16 = [r30], 16;       \
425         ld8.fill r17 = [r31], 16;       \
426         ;;                              \
427         ld8.fill r18 = [r30], 16;       \
428         ld8.fill r19 = [r31], 16;       \
429         ;;                              \
430         ld8.fill r20 = [r30], 16;       \
431         ld8.fill r21 = [r31], 16;       \
432         ;;                              \
433         ld8.fill r22 = [r30], 16;       \
434         ld8.fill r23 = [r31], 16;       \
435         ;;                              \
436         ld8.fill r24 = [r30], 16;       \
437         ld8.fill r25 = [r31], 16;       \
438         ;;                              \
439         ld8.fill r26 = [r30], 16;       \
440         ld8.fill r27 = [r31], 16;       \
441         ;;                              \
442         ld8.fill r28 = [r30], 16;       \
443         ld8.fill r29 = [r31], 16;       \
444         ;;                              \
445         ld8.fill r30 = [r30];           \
446         ld8.fill r31 = [r31];           \
447         ;;                              \
448         mov ar.unat = clob
449
450 #define BSW_1(clob0, clob1)     XEN_BSW_1(clob1)
451
452
453 #define COVER   \
454         XEN_HYPER_COVER
455
456 #define RFI                     \
457         XEN_HYPER_RFI;          \
458         dv_serialize_data