2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
17 #include <asm/setup.h>
20 #include <asm/xen/hypervisor.h>
21 #include <asm/xen/hypercall.h>
25 #include <xen/interface/callback.h>
26 #include <xen/interface/memory.h>
27 #include <xen/interface/physdev.h>
28 #include <xen/features.h>
33 /* These are code, but not functions. Defined in entry.S */
34 extern const char xen_hypervisor_callback[];
35 extern const char xen_failsafe_callback[];
36 extern void xen_sysenter_target(void);
37 extern void xen_syscall_target(void);
38 extern void xen_syscall32_target(void);
40 /* Amount of extra memory space we add to the e820 ranges */
41 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
43 /* Number of pages released from the initial allocation. */
44 unsigned long xen_released_pages;
47 * The maximum amount of extra memory compared to the base size. The
48 * main scaling factor is the size of struct page. At extreme ratios
49 * of base:extra, all the base memory can be filled with page
50 * structures for the extra memory, leaving no space for anything
53 * 10x seems like a reasonable balance between scaling flexibility and
54 * leaving a practically usable system.
56 #define EXTRA_MEM_RATIO (10)
58 static void __init xen_add_extra_mem(u64 start, u64 size)
63 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
65 if (xen_extra_mem[i].size == 0) {
66 xen_extra_mem[i].start = start;
67 xen_extra_mem[i].size = size;
70 /* Append to existing region. */
71 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
72 xen_extra_mem[i].size += size;
76 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
77 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
79 memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
81 xen_max_p2m_pfn = PFN_DOWN(start + size);
82 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
83 unsigned long mfn = pfn_to_mfn(pfn);
85 if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
87 WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
90 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
94 static unsigned long __init xen_release_chunk(unsigned long start,
97 struct xen_memory_reservation reservation = {
102 unsigned long len = 0;
106 for(pfn = start; pfn < end; pfn++) {
107 unsigned long mfn = pfn_to_mfn(pfn);
109 /* Make sure pfn exists to start with */
110 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
113 set_xen_guest_handle(reservation.extent_start, &mfn);
114 reservation.nr_extents = 1;
116 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
118 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
120 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
124 printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
130 static unsigned long __init xen_set_identity_and_release(
131 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
133 phys_addr_t start = 0;
134 unsigned long released = 0;
135 unsigned long identity = 0;
136 const struct e820entry *entry;
140 * Combine non-RAM regions and gaps until a RAM region (or the
141 * end of the map) is reached, then set the 1:1 map and
142 * release the pages (if available) in those non-RAM regions.
144 * The combined non-RAM regions are rounded to a whole number
145 * of pages so any partial pages are accessible via the 1:1
146 * mapping. This is needed for some BIOSes that put (for
147 * example) the DMI tables in a reserved region that begins on
148 * a non-page boundary.
150 for (i = 0, entry = list; i < map_size; i++, entry++) {
151 phys_addr_t end = entry->addr + entry->size;
153 if (entry->type == E820_RAM || i == map_size - 1) {
154 unsigned long start_pfn = PFN_DOWN(start);
155 unsigned long end_pfn = PFN_UP(end);
157 if (entry->type == E820_RAM)
158 end_pfn = PFN_UP(entry->addr);
160 if (start_pfn < end_pfn) {
161 if (start_pfn < nr_pages)
162 released += xen_release_chunk(
163 start_pfn, min(end_pfn, nr_pages));
165 identity += set_phys_range_identity(
172 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
173 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
178 static unsigned long __init xen_get_max_pages(void)
180 unsigned long max_pages = MAX_DOMAIN_PAGES;
181 domid_t domid = DOMID_SELF;
185 * For the initial domain we use the maximum reservation as
188 * For guest domains the current maximum reservation reflects
189 * the current maximum rather than the static maximum. In this
190 * case the e820 map provided to us will cover the static
193 if (xen_initial_domain()) {
194 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
199 return min(max_pages, MAX_DOMAIN_PAGES);
202 static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
204 u64 end = start + size;
206 /* Align RAM regions to page boundaries. */
207 if (type == E820_RAM) {
208 start = PAGE_ALIGN(start);
209 end &= ~((u64)PAGE_SIZE - 1);
212 e820_add_region(start, end - start, type);
215 void xen_ignore_unusable(struct e820entry *list, size_t map_size)
217 struct e820entry *entry;
220 for (i = 0, entry = list; i < map_size; i++, entry++) {
221 if (entry->type == E820_UNUSABLE)
222 entry->type = E820_RAM;
227 * machine_specific_memory_setup - Hook for machine specific memory setup.
229 char * __init xen_memory_setup(void)
231 static struct e820entry map[E820MAX] __initdata;
233 unsigned long max_pfn = xen_start_info->nr_pages;
234 unsigned long long mem_end;
236 struct xen_memory_map memmap;
237 unsigned long max_pages;
238 unsigned long extra_pages = 0;
242 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
243 mem_end = PFN_PHYS(max_pfn);
245 memmap.nr_entries = E820MAX;
246 set_xen_guest_handle(memmap.buffer, map);
248 op = xen_initial_domain() ?
249 XENMEM_machine_memory_map :
251 rc = HYPERVISOR_memory_op(op, &memmap);
253 BUG_ON(xen_initial_domain());
254 memmap.nr_entries = 1;
256 map[0].size = mem_end;
257 /* 8MB slack (to balance backend allocations). */
258 map[0].size += 8ULL << 20;
259 map[0].type = E820_RAM;
265 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
266 * regions, so if we're using the machine memory map leave the
267 * region as RAM as it is in the pseudo-physical map.
269 * UNUSABLE regions in domUs are not handled and will need
270 * a patch in the future.
272 if (xen_initial_domain())
273 xen_ignore_unusable(map, memmap.nr_entries);
275 /* Make sure the Xen-supplied memory map is well-ordered. */
276 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
278 max_pages = xen_get_max_pages();
279 if (max_pages > max_pfn)
280 extra_pages += max_pages - max_pfn;
283 * Set P2M for all non-RAM pages and E820 gaps to be identity
284 * type PFNs. Any RAM pages that would be made inaccesible by
285 * this are first released.
287 xen_released_pages = xen_set_identity_and_release(
288 map, memmap.nr_entries, max_pfn);
289 extra_pages += xen_released_pages;
292 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
293 * factor the base size. On non-highmem systems, the base
294 * size is the full initial memory allocation; on highmem it
295 * is limited to the max size of lowmem, so that it doesn't
296 * get completely filled.
298 * In principle there could be a problem in lowmem systems if
299 * the initial memory is also very large with respect to
300 * lowmem, but we won't try to deal with that here.
302 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
306 while (i < memmap.nr_entries) {
307 u64 addr = map[i].addr;
308 u64 size = map[i].size;
309 u32 type = map[i].type;
311 if (type == E820_RAM) {
312 if (addr < mem_end) {
313 size = min(size, mem_end - addr);
314 } else if (extra_pages) {
315 size = min(size, (u64)extra_pages * PAGE_SIZE);
316 extra_pages -= size / PAGE_SIZE;
317 xen_add_extra_mem(addr, size);
319 type = E820_UNUSABLE;
322 xen_align_and_add_e820_region(addr, size, type);
326 if (map[i].size == 0)
331 * In domU, the ISA region is normal, usable memory, but we
332 * reserve ISA memory anyway because too many things poke
335 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
342 * See comment above "struct start_info" in <xen/interface/xen.h>
344 memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
345 __pa(xen_start_info->pt_base),
348 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
354 * Set the bit indicating "nosegneg" library variants should be used.
355 * We only need to bother in pure 32-bit mode; compat 32-bit processes
356 * can have un-truncated segments, so wrapping around is allowed.
358 static void __init fiddle_vdso(void)
362 mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
363 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
364 mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
365 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
369 static int __cpuinit register_callback(unsigned type, const void *func)
371 struct callback_register callback = {
373 .address = XEN_CALLBACK(__KERNEL_CS, func),
374 .flags = CALLBACKF_mask_events,
377 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
380 void __cpuinit xen_enable_sysenter(void)
383 unsigned sysenter_feature;
386 sysenter_feature = X86_FEATURE_SEP;
388 sysenter_feature = X86_FEATURE_SYSENTER32;
391 if (!boot_cpu_has(sysenter_feature))
394 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
396 setup_clear_cpu_cap(sysenter_feature);
399 void __cpuinit xen_enable_syscall(void)
404 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
406 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
407 /* Pretty fatal; 64-bit userspace has no other
408 mechanism for syscalls. */
411 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
412 ret = register_callback(CALLBACKTYPE_syscall32,
413 xen_syscall32_target);
415 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
417 #endif /* CONFIG_X86_64 */
420 void __init xen_arch_setup(void)
422 xen_panic_handler_init();
424 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
425 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
427 if (!xen_feature(XENFEAT_auto_translated_physmap))
428 HYPERVISOR_vm_assist(VMASST_CMD_enable,
429 VMASST_TYPE_pae_extended_cr3);
431 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
432 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
435 xen_enable_sysenter();
436 xen_enable_syscall();
439 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
440 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
445 memcpy(boot_command_line, xen_start_info->cmd_line,
446 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
447 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
449 /* Set up idle, making sure it calls safe_halt() pvop */
451 boot_cpu_data.hlt_works_ok = 1;
454 boot_option_idle_override = IDLE_HALT;
455 WARN_ON(set_pm_idle_to_default());