2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
17 #include <asm/setup.h>
19 #include <asm/xen/hypervisor.h>
20 #include <asm/xen/hypercall.h>
24 #include <xen/interface/callback.h>
25 #include <xen/interface/memory.h>
26 #include <xen/interface/physdev.h>
27 #include <xen/features.h>
32 /* These are code, but not functions. Defined in entry.S */
33 extern const char xen_hypervisor_callback[];
34 extern const char xen_failsafe_callback[];
35 extern void xen_sysenter_target(void);
36 extern void xen_syscall_target(void);
37 extern void xen_syscall32_target(void);
39 /* Amount of extra memory space we add to the e820 ranges */
40 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42 /* Number of pages released from the initial allocation. */
43 unsigned long xen_released_pages;
46 * The maximum amount of extra memory compared to the base size. The
47 * main scaling factor is the size of struct page. At extreme ratios
48 * of base:extra, all the base memory can be filled with page
49 * structures for the extra memory, leaving no space for anything
52 * 10x seems like a reasonable balance between scaling flexibility and
53 * leaving a practically usable system.
55 #define EXTRA_MEM_RATIO (10)
57 static void __init xen_add_extra_mem(u64 start, u64 size)
62 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
64 if (xen_extra_mem[i].size == 0) {
65 xen_extra_mem[i].start = start;
66 xen_extra_mem[i].size = size;
69 /* Append to existing region. */
70 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
71 xen_extra_mem[i].size += size;
75 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
76 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
78 memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
80 xen_max_p2m_pfn = PFN_DOWN(start + size);
82 for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
83 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
86 static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
89 struct xen_memory_reservation reservation = {
94 unsigned long start, end;
95 unsigned long len = 0;
99 start = PFN_UP(start_addr);
100 end = PFN_DOWN(end_addr);
105 for(pfn = start; pfn < end; pfn++) {
106 unsigned long mfn = pfn_to_mfn(pfn);
108 /* Make sure pfn exists to start with */
109 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
112 set_xen_guest_handle(reservation.extent_start, &mfn);
113 reservation.nr_extents = 1;
115 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
117 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
119 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
123 printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
129 static unsigned long __init xen_return_unused_memory(
130 unsigned long max_pfn, const struct e820entry *map, int nr_map)
132 phys_addr_t max_addr = PFN_PHYS(max_pfn);
133 phys_addr_t last_end = ISA_END_ADDRESS;
134 unsigned long released = 0;
137 /* Free any unused memory above the low 1Mbyte. */
138 for (i = 0; i < nr_map && last_end < max_addr; i++) {
139 phys_addr_t end = map[i].addr;
140 end = min(max_addr, end);
143 released += xen_release_chunk(last_end, end);
144 last_end = max(last_end, map[i].addr + map[i].size);
147 if (last_end < max_addr)
148 released += xen_release_chunk(last_end, max_addr);
150 printk(KERN_INFO "released %lu pages of unused memory\n", released);
154 static unsigned long __init xen_set_identity(const struct e820entry *list,
157 phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
158 phys_addr_t start_pci = last;
159 const struct e820entry *entry;
160 unsigned long identity = 0;
163 for (i = 0, entry = list; i < map_size; i++, entry++) {
164 phys_addr_t start = entry->addr;
165 phys_addr_t end = start + entry->size;
173 /* Skip over the 1MB region. */
177 if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
178 if (start > start_pci)
179 identity += set_phys_range_identity(
180 PFN_UP(start_pci), PFN_DOWN(start));
182 /* Without saving 'last' we would gooble RAM too
183 * at the end of the loop. */
188 start_pci = min(start, start_pci);
191 if (last > start_pci)
192 identity += set_phys_range_identity(
193 PFN_UP(start_pci), PFN_DOWN(last));
197 static unsigned long __init xen_get_max_pages(void)
199 unsigned long max_pages = MAX_DOMAIN_PAGES;
200 domid_t domid = DOMID_SELF;
203 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
206 return min(max_pages, MAX_DOMAIN_PAGES);
209 static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
211 u64 end = start + size;
213 /* Align RAM regions to page boundaries. */
214 if (type == E820_RAM) {
215 start = PAGE_ALIGN(start);
216 end &= ~((u64)PAGE_SIZE - 1);
219 e820_add_region(start, end - start, type);
223 * machine_specific_memory_setup - Hook for machine specific memory setup.
225 char * __init xen_memory_setup(void)
227 static struct e820entry map[E820MAX] __initdata;
229 unsigned long max_pfn = xen_start_info->nr_pages;
230 unsigned long long mem_end;
232 struct xen_memory_map memmap;
233 unsigned long max_pages;
234 unsigned long extra_pages = 0;
235 unsigned long identity_pages = 0;
239 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
240 mem_end = PFN_PHYS(max_pfn);
242 memmap.nr_entries = E820MAX;
243 set_xen_guest_handle(memmap.buffer, map);
245 op = xen_initial_domain() ?
246 XENMEM_machine_memory_map :
248 rc = HYPERVISOR_memory_op(op, &memmap);
250 BUG_ON(xen_initial_domain());
251 memmap.nr_entries = 1;
253 map[0].size = mem_end;
254 /* 8MB slack (to balance backend allocations). */
255 map[0].size += 8ULL << 20;
256 map[0].type = E820_RAM;
261 /* Make sure the Xen-supplied memory map is well-ordered. */
262 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
264 max_pages = xen_get_max_pages();
265 if (max_pages > max_pfn)
266 extra_pages += max_pages - max_pfn;
268 xen_released_pages = xen_return_unused_memory(max_pfn, map,
270 extra_pages += xen_released_pages;
273 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
274 * factor the base size. On non-highmem systems, the base
275 * size is the full initial memory allocation; on highmem it
276 * is limited to the max size of lowmem, so that it doesn't
277 * get completely filled.
279 * In principle there could be a problem in lowmem systems if
280 * the initial memory is also very large with respect to
281 * lowmem, but we won't try to deal with that here.
283 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
287 while (i < memmap.nr_entries) {
288 u64 addr = map[i].addr;
289 u64 size = map[i].size;
290 u32 type = map[i].type;
292 if (type == E820_RAM) {
293 if (addr < mem_end) {
294 size = min(size, mem_end - addr);
295 } else if (extra_pages) {
296 size = min(size, (u64)extra_pages * PAGE_SIZE);
297 extra_pages -= size / PAGE_SIZE;
298 xen_add_extra_mem(addr, size);
300 type = E820_UNUSABLE;
303 xen_align_and_add_e820_region(addr, size, type);
307 if (map[i].size == 0)
312 * In domU, the ISA region is normal, usable memory, but we
313 * reserve ISA memory anyway because too many things poke
316 * In Dom0, the host E820 information can leave gaps in the
317 * ISA range, which would cause us to release those pages. To
318 * avoid this, we unconditionally reserve them here.
320 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
327 * See comment above "struct start_info" in <xen/interface/xen.h>
329 memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
330 __pa(xen_start_info->pt_base),
333 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
336 * Set P2M for all non-RAM pages and E820 gaps to be identity
339 identity_pages = xen_set_identity(e820.map, e820.nr_map);
340 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
345 * Set the bit indicating "nosegneg" library variants should be used.
346 * We only need to bother in pure 32-bit mode; compat 32-bit processes
347 * can have un-truncated segments, so wrapping around is allowed.
349 static void __init fiddle_vdso(void)
353 mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
354 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
355 mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
356 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
360 static int __cpuinit register_callback(unsigned type, const void *func)
362 struct callback_register callback = {
364 .address = XEN_CALLBACK(__KERNEL_CS, func),
365 .flags = CALLBACKF_mask_events,
368 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
371 void __cpuinit xen_enable_sysenter(void)
374 unsigned sysenter_feature;
377 sysenter_feature = X86_FEATURE_SEP;
379 sysenter_feature = X86_FEATURE_SYSENTER32;
382 if (!boot_cpu_has(sysenter_feature))
385 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
387 setup_clear_cpu_cap(sysenter_feature);
390 void __cpuinit xen_enable_syscall(void)
395 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
397 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
398 /* Pretty fatal; 64-bit userspace has no other
399 mechanism for syscalls. */
402 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
403 ret = register_callback(CALLBACKTYPE_syscall32,
404 xen_syscall32_target);
406 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
408 #endif /* CONFIG_X86_64 */
411 void __init xen_arch_setup(void)
413 xen_panic_handler_init();
415 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
416 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
418 if (!xen_feature(XENFEAT_auto_translated_physmap))
419 HYPERVISOR_vm_assist(VMASST_CMD_enable,
420 VMASST_TYPE_pae_extended_cr3);
422 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
423 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
426 xen_enable_sysenter();
427 xen_enable_syscall();
430 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
431 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
436 memcpy(boot_command_line, xen_start_info->cmd_line,
437 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
438 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
440 /* Set up idle, making sure it calls safe_halt() pvop */
442 boot_cpu_data.hlt_works_ok = 1;
445 boot_option_idle_override = IDLE_HALT;