depends on X86_LOCAL_APIC && !X86_VISWS
default y
+config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+ option saves about 4k and might cause you much additional grey
+ hair.
+
endmenu
unsigned long i;
int config_size;
- if (!phys_addr || !size || !cpu_has_apic)
+ if (!phys_addr || !size)
return -EINVAL;
mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
dmi_check_system(acpi_dmi_table);
#endif
+ if (!cpu_has_apic)
+ return -ENODEV;
+
/*
* If acpi_disabled, bail out
* One exception: acpi=ht continues far enough to enumerate LAPICs
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
+ select PPC_970_NAP
default y
config PPC_PREP
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
select PPC_UDBG_16550
+ select PPC_970_NAP
default n
help
This option enables support for the Maple 970FX Evaluation Board.
bool
default n
+config PPC_970_NAP
+ bool
+ default n
+
source "drivers/cpufreq/Kconfig"
config CPU_FREQ_PMAC
CFLAGS += -mstring
endif
+ifeq ($(CONFIG_6xx),y)
+CFLAGS += -mcpu=powerpc
+endif
+
cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
firmware.o sysfs.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
-obj-$(CONFIG_POWER4) += idle_power4.o
+obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
#endif /* CONFIG_PPC64 */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
#ifdef CONFIG_PPC32
stw r12,4(r11)
#endif
b 3f
+
2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
*/
+ lwz r9,THREAD_INFO-THREAD(r12)
+ cmplw r1,r9 /* if r1 <= current->thread_info */
+ ble- stack_ovf /* then the kernel stack overflowed */
+5:
#ifdef CONFIG_6xx
- mfspr r11,SPRN_HID0
- mtcr r11
-BEGIN_FTR_SECTION
- bt- 8,4f /* Check DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
- bt- 9,4f /* Check NAP */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+ tophys(r9,r9) /* check local flags */
+ lwz r12,TI_LOCAL_FLAGS(r9)
+ mtcrf 0x01,r12
+ bt- 31-TLF_NAPPING,4f
#endif /* CONFIG_6xx */
.globl transfer_to_handler_cont
transfer_to_handler_cont:
- lwz r11,THREAD_INFO-THREAD(r12)
- cmplw r1,r11 /* if r1 <= current->thread_info */
- ble- stack_ovf /* then the kernel stack overflowed */
3:
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
- FIX_SRR1(r10,r12)
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
-#ifdef CONFIG_6xx
-4: b power_save_6xx_restore
+#ifdef CONFIG_6xx
+4: rlwinm r12,r12,0,~_TLF_NAPPING
+ stw r12,TI_LOCAL_FLAGS(r9)
+ b power_save_6xx_restore
#endif
/*
*/
stack_ovf:
/* sometimes we use a statically-allocated stack, which is OK. */
- lis r11,_end@h
- ori r11,r11,_end@l
- cmplw r1,r11
- ble 3b /* r1 <= &_end is OK */
+ lis r12,_end@h
+ ori r12,r12,_end@l
+ cmplw r1,r12
+ ble 5b /* r1 <= &_end is OK */
SAVE_NVGPRS(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
bl hdlr; \
b .ret_from_except
+/*
+ * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
+ * in the idle task and therefore need the special idle handling.
+ */
+#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ FINISH_NAP; \
+ DISABLE_INTS; \
+ bl .save_nvgprs; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b .ret_from_except
+
#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
.align 7; \
.globl label##_common; \
label##_common: \
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ FINISH_NAP; \
DISABLE_INTS; \
bl .ppc64_runlatch_on; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b .ret_from_except_lite
+/*
+ * When the idle code in power4_idle puts the CPU into NAP mode,
+ * it has to do so in a loop, and relies on the external interrupt
+ * and decrementer interrupt entry code to get it out of the loop.
+ * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
+ * to signal that it is in the loop and needs help to get out.
+ */
+#ifdef CONFIG_PPC_970_NAP
+#define FINISH_NAP \
+BEGIN_FTR_SECTION \
+ clrrdi r11,r1,THREAD_SHIFT; \
+ ld r9,TI_LOCAL_FLAGS(r11); \
+ andi. r10,r9,_TLF_NAPPING; \
+ bnel power4_fixup_nap; \
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#else
+#define FINISH_NAP
+#endif
+
/*
* Start of pSeries system interrupt routines
*/
.globl machine_check_common
machine_check_common:
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+ FINISH_NAP
DISABLE_INTS
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
+ STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
#ifdef CONFIG_ALTIVEC
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
.globl hardware_interrupt_entry
hardware_interrupt_common:
EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+ FINISH_NAP
hardware_interrupt_entry:
DISABLE_INTS
bl .ppc64_runlatch_on
bl .do_IRQ
b .ret_from_except_lite
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+ andc r9,r9,r10
+ std r9,TI_LOCAL_FLAGS(r11)
+ ld r10,_LINK(r1) /* make idle task do the */
+ std r10,_NIP(r1) /* equivalent of a blr */
+ blr
+#endif
+
.align 7
.globl alignment_common
alignment_common:
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- ppc64_runlatch_off();
-
while (!need_resched() && !cpu_should_die()) {
+ ppc64_runlatch_off();
+
if (ppc_md.power_save) {
clear_thread_flag(TIF_POLLING_NRFLAG);
/*
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
-#undef DEBUG
-
.text
/*
dcbf 0,r4
dcbf 0,r4
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-#ifdef DEBUG
- lis r6,nap_enter_count@ha
- lwz r4,nap_enter_count@l(r6)
- addi r4,r4,1
- stw r4,nap_enter_count@l(r6)
-#endif
2:
BEGIN_FTR_SECTION
/* Go to low speed mode on some 750FX */
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+ lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
+ ori r8,r8,_TLF_NAPPING /* so when we take an exception */
+ stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
mfmsr r7
ori r7,r7,MSR_EE
oris r7,r7,MSR_POW@h
- sync
- isync
+1: sync
mtmsr r7
isync
- sync
- blr
-
+ b 1b
+
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
* we are called with DR/IR still off and r2 containing physical
- * address of current.
+ * address of current. R11 points to the exception frame (physical
+ * address). We have to preserve r10.
*/
_GLOBAL(power_save_6xx_restore)
- mfspr r11,SPRN_HID0
- rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
- cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
-BEGIN_FTR_SECTION
- rlwinm r11,r11,0,9,7 /* Clear DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
- mtspr SPRN_HID0, r11
+ lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
+ stw r9,_NIP(r11) /* make it do a blr */
-#ifdef DEBUG
- beq cr1,1f
- lis r11,(nap_return_count-KERNELBASE)@ha
- lwz r9,nap_return_count@l(r11)
- addi r9,r9,1
- stw r9,nap_return_count@l(r11)
-1:
-#endif
-
- rlwinm r9,r1,0,0,18
- tophys(r9,r9)
- lwz r11,TI_CPU(r9)
+#ifdef CONFIG_SMP
+ mfspr r12,SPRN_SPRG3
+ lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2
+#else
+ li r11,0
+#endif
/* Todo make sure all these are in the same page
- * and load r22 (@ha part + CPU offset) only once
+ * and load r11 (@ha part + CPU offset) only once
*/
BEGIN_FTR_SECTION
- beq cr1,1f
+ mfspr r9,SPRN_HID0
+ andis. r9,r9,HID0_NAP@h
+ beq 1f
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
lwz r9,nap_save_msscr0@l(r9)
mtspr SPRN_MSSCR0, r9
_GLOBAL(powersave_lowspeed)
.long 0
-
-#ifdef DEBUG
-_GLOBAL(nap_enter_count)
- .space 4
-_GLOBAL(nap_return_count)
- .space 4
-#endif
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
+ ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
+ ori r8,r8,_TLF_NAPPING /* so when we take an exception */
+ std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
mfmsr r7
ori r7,r7,MSR_EE
oris r7,r7,MSR_POW@h
- sync
+1: sync
isync
mtmsrd r7
isync
- sync
- blr
+ b 1b
+
* Don't use virtual irqs 0, 1, 2 for devices.
* The pcnet32 driver considers interrupt numbers < 2 to be invalid,
* and 2 is the XICS IPI interrupt.
- * We limit virtual irqs to 17 less than NR_IRQS so that when we
- * offset them by 16 (to reserve the first 16 for ISA interrupts)
- * we don't end up with an interrupt number >= NR_IRQS.
+ * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
+ * that when we offset them we don't end up with an interrupt
+ * number >= virt_irq_max.
*/
#define MIN_VIRT_IRQ 3
-#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
-#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
+
+unsigned int virt_irq_max;
+static unsigned int max_virt_irq;
+static unsigned int nr_virt_irqs;
void
virt_irq_init(void)
{
int i;
+
+ if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
+ virt_irq_max = NR_IRQS - 1;
+ max_virt_irq = virt_irq_max - __irq_offset_value;
+ nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
+
for (i = 0; i < NR_IRQS; i++)
virt_irq_to_real_map[i] = UNDEFINED_IRQ;
}
return real_irq;
}
- /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
+ /* map to a number between MIN_VIRT_IRQ and max_virt_irq */
virq = real_irq;
- if (virq > MAX_VIRT_IRQ)
- virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
+ if (virq > max_virt_irq)
+ virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
/* search for this number or a free slot */
first_virq = virq;
while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
if (virt_irq_to_real_map[virq] == real_irq)
return virq;
- if (++virq > MAX_VIRT_IRQ)
+ if (++virq > max_virt_irq)
virq = MIN_VIRT_IRQ;
if (virq == first_virq)
goto nospace; /* oops, no free slots */
nospace:
if (!warned) {
printk(KERN_CRIT "Interrupt table is full\n");
- printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
- "in your kernel sources and rebuild.\n", NR_IRQS);
+ printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
+ "in your kernel sources and rebuild.\n", virt_irq_max);
warned = 1;
}
return NO_IRQ;
virq = real_irq;
- if (virq > MAX_VIRT_IRQ)
- virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
+ if (virq > max_virt_irq)
+ virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
first_virq = virq;
virq++;
- if (virq >= MAX_VIRT_IRQ)
+ if (virq >= max_virt_irq)
virq = 0;
} while (first_virq != virq);
* non-IBM designs !
* - it has /rtas
*/
- len = prom_getprop(_prom->root, "model",
+ len = prom_getprop(_prom->root, "device_type",
compat, sizeof(compat)-1);
if (len <= 0)
return PLATFORM_GENERIC;
- compat[len] = 0;
- if (strcmp(compat, "chrp"))
+ if (strncmp(compat, RELOC("chrp"), 4))
return PLATFORM_GENERIC;
/* Default to pSeries. We need to know if we are running LPAR */
struct proc_dir_entry *entry;
if (!machine_is(pseries))
- return 1;
+ return -ENODEV;
rtas_node = of_find_node_by_name(NULL, "rtas");
if (rtas_node == NULL)
- return 1;
+ return -ENODEV;
entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
if (entry)
cycles_t resume_time = get_cycles();
cycles_t delta_time = resume_time - csa->suspend_time;
- csa->lscsa->decr.slot[0] = delta_time;
+ csa->lscsa->decr.slot[0] -= delta_time;
}
}
extern void chrp_find_bridges(void);
extern void chrp_event_scan(unsigned long);
+extern void chrp_pcibios_fixup(void);
#include <asm/grackle.h>
#include <asm/rtas.h>
+#include "chrp.h"
+
/* LongTrail */
void __iomem *gg2_pci_config_base;
}
/* Do not fixup interrupts from OF tree on pegasos */
- if (is_pegasos == 0)
- ppc_md.pcibios_fixup = chrp_pcibios_fixup;
+ if (is_pegasos)
+ ppc_md.pcibios_fixup = NULL;
}
if (_chrp_type == _CHRP_Pegasos)
ppc_md.get_irq = i8259_irq;
- else
- ppc_md.get_irq = mpic_get_irq;
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
/* see if there is a keyboard in the device tree
/* Assume we have an 8259... */
__irq_offset_value = NUM_ISA_INTERRUPTS;
- ppc_md.setup_arch = chrp_setup_arch;
- ppc_md.show_cpuinfo = chrp_show_cpuinfo;
-
- ppc_md.init_IRQ = chrp_init_IRQ;
- ppc_md.init = chrp_init2;
-
- ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
-
- ppc_md.restart = rtas_restart;
- ppc_md.power_off = rtas_power_off;
- ppc_md.halt = rtas_halt;
-
- ppc_md.time_init = chrp_time_init;
- ppc_md.calibrate_decr = generic_calibrate_decr;
-
- /* this may get overridden with rtas routines later... */
- ppc_md.set_rtc_time = chrp_set_rtc_time;
- ppc_md.get_rtc_time = chrp_get_rtc_time;
-
-#ifdef CONFIG_SMP
- smp_ops = &chrp_smp_ops;
-#endif /* CONFIG_SMP */
+ return 1;
}
+
+define_machine(chrp) {
+ .name = "CHRP",
+ .probe = chrp_probe,
+ .setup_arch = chrp_setup_arch,
+ .init = chrp_init2,
+ .show_cpuinfo = chrp_show_cpuinfo,
+ .init_IRQ = chrp_init_IRQ,
+ .get_irq = mpic_get_irq,
+ .pcibios_fixup = chrp_pcibios_fixup,
+ .restart = rtas_restart,
+ .power_off = rtas_power_off,
+ .halt = rtas_halt,
+ .time_init = chrp_time_init,
+ .set_rtc_time = chrp_set_rtc_time,
+ .get_rtc_time = chrp_get_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .phys_mem_access_prot = pci_phys_mem_access_prot,
+};
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/lpar_map.h>
#include <asm/udbg.h>
+#include <asm/irq.h>
#include "naca.h"
#include "setup.h"
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
+ /*
+ * The Hypervisor only allows us up to 256 interrupt
+ * sources (the irq number is passed in a u8).
+ */
+ virt_irq_max = 255;
+
return 1;
}
pci_addr_cache_remove_device(dev);
dn = pci_device_to_OF_node(dev);
- PCI_DN(dn)->pcidev = NULL;
- pci_dev_put (dev);
+ if (PCI_DN(dn)->pcidev) {
+ PCI_DN(dn)->pcidev = NULL;
+ pci_dev_put (dev);
+ }
}
void eeh_remove_bus_device(struct pci_dev *dev)
/* No RTAS */
if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "rtasd: no event-scan on system\n");
- return 1;
+ return -ENODEV;
}
entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL);
while (map->bus_id != NULL) {
idx = -1;
s = strrchr(dev->bus_id, '.');
- if (s != NULL)
+ if (s != NULL) {
idx = (int)simple_strtol(s + 1, NULL, 10);
- else
+ len = s - dev->bus_id;
+ } else {
s = dev->bus_id;
-
- len = s - dev->bus_id;
+ len = strlen(dev->bus_id);
+ }
if (!strncmp(dev->bus_id, map->bus_id, len)) {
pdev = container_of(dev, struct platform_device, dev);
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.17-rc1
-# Mon Apr 3 16:11:14 2006
+# Linux kernel version: 2.6.17-rc1-git11
+# Sun Apr 16 07:22:36 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
CONFIG_SLAB=y
+CONFIG_DOUBLEFAULT=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
CONFIG_PREEMPT_BKL=y
CONFIG_NUMA=y
CONFIG_K8_NUMA=y
+CONFIG_NODES_SHIFT=6
CONFIG_X86_64_ACPI_NUMA=y
CONFIG_NUMA_EMU=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_QLA_FC is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_ITMTOUCH is not set
-# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_TOUCHSCREEN is not set
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
#
# CONFIG_NEW_LEDS is not set
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
#
# InfiniBand support
#
.quad compat_sys_get_robust_list
.quad sys_splice
.quad sys_sync_file_range
+ .quad sys_tee
ia32_syscall_end:
#include <linux/moduleparam.h>
#include <linux/nmi.h>
#include <linux/kprobes.h>
+#include <linux/kexec.h>
#include <asm/system.h>
#include <asm/uaccess.h>
printk(KERN_ALERT "RIP ");
printk_address(regs->rip);
printk(" RSP <%016lx>\n", regs->rsp);
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
}
void die(const char * str, struct pt_regs * regs, long err)
*/
printk(str, safe_smp_processor_id());
show_registers(regs);
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
if (panic_on_timeout || panic_on_oops)
panic("nmi watchdog");
printk("console shuts up ...\n");
DECLARE_COMPLETION(all_gone);
elv_unregister(&iosched_as);
ioc_gone = &all_gone;
- barrier();
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
if (atomic_read(&ioc_count))
- complete(ioc_gone);
+ wait_for_completion(ioc_gone);
synchronize_rcu();
kmem_cache_destroy(arq_pool);
}
return cfqq;
}
+static void
+cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
+{
+ read_lock(&cfq_exit_lock);
+ rb_erase(&cic->rb_node, &ioc->cic_root);
+ read_unlock(&cfq_exit_lock);
+ kmem_cache_free(cfq_ioc_pool, cic);
+ atomic_dec(&ioc_count);
+}
+
static struct cfq_io_context *
cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
- struct rb_node *n = ioc->cic_root.rb_node;
+ struct rb_node *n;
struct cfq_io_context *cic;
- void *key = cfqd;
+ void *k, *key = cfqd;
+restart:
+ n = ioc->cic_root.rb_node;
while (n) {
cic = rb_entry(n, struct cfq_io_context, rb_node);
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(ioc, cic);
+ goto restart;
+ }
- if (key < cic->key)
+ if (key < k)
n = n->rb_left;
- else if (key > cic->key)
+ else if (key > k)
n = n->rb_right;
else
return cic;
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
struct cfq_io_context *cic)
{
- struct rb_node **p = &ioc->cic_root.rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node **p;
+ struct rb_node *parent;
struct cfq_io_context *__cic;
-
- read_lock(&cfq_exit_lock);
+ void *k;
cic->ioc = ioc;
cic->key = cfqd;
ioc->set_ioprio = cfq_ioc_set_ioprio;
-
+restart:
+ parent = NULL;
+ p = &ioc->cic_root.rb_node;
while (*p) {
parent = *p;
__cic = rb_entry(parent, struct cfq_io_context, rb_node);
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = __cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(ioc, cic);
+ goto restart;
+ }
- if (cic->key < __cic->key)
+ if (cic->key < k)
p = &(*p)->rb_left;
- else if (cic->key > __cic->key)
+ else if (cic->key > k)
p = &(*p)->rb_right;
else
BUG();
}
+ read_lock(&cfq_exit_lock);
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
list_add(&cic->queue_list, &cfqd->cic_list);
DECLARE_COMPLETION(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
- barrier();
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
if (atomic_read(&ioc_count))
- complete(ioc_gone);
+ wait_for_completion(ioc_gone);
synchronize_rcu();
cfq_slab_kill();
}
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
extern void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev);
-extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
- drm_device_t * dev);
extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev);
extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
map->handle = drm_ioremap(map->offset, map->size, dev);
}
+#if 0
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
struct drm_device *dev)
{
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
}
+#endif /* 0 */
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
struct drm_device *dev)
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
}
#if __OS_HAS_AGP
+/*
+ * Find the drm_map that covers the range [offset, offset+size).
+ */
+static drm_map_t *drm_lookup_map(unsigned long offset,
+ unsigned long size, drm_device_t * dev)
+{
+ struct list_head *list;
+ drm_map_list_t *r_list;
+ drm_map_t *map;
+
+ list_for_each(list, &dev->maplist->head) {
+ r_list = (drm_map_list_t *) list;
+ map = r_list->map;
+ if (!map)
+ continue;
+ if (map->offset <= offset
+ && (offset + size) <= (map->offset + map->size))
+ return map;
+ }
+ return NULL;
+}
+
+static void *agp_remap(unsigned long offset, unsigned long size,
+ drm_device_t * dev)
+{
+ unsigned long *phys_addr_map, i, num_pages =
+ PAGE_ALIGN(size) / PAGE_SIZE;
+ struct drm_agp_mem *agpmem;
+ struct page **page_map;
+ void *addr;
+
+ size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+ offset -= dev->hose->mem_space->start;
+#endif
+
+ for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
+ if (agpmem->bound <= offset
+ && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+ (offset + size))
+ break;
+ if (!agpmem)
+ return NULL;
+
+ /*
+ * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+ * the CPU do not get remapped by the GART. We fix this by using the kernel's
+ * page-table instead (that's probably faster anyhow...).
+ */
+ /* note: use vmalloc() because num_pages could be large... */
+ page_map = vmalloc(num_pages * sizeof(struct page *));
+ if (!page_map)
+ return NULL;
+
+ phys_addr_map =
+ agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
+ for (i = 0; i < num_pages; ++i)
+ page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
+ addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+ vfree(page_map);
+
+ return addr;
+}
+
/** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
{
{
return drm_agp_unbind_memory(handle);
}
+
+#else /* __OS_HAS_AGP */
+
+static inline drm_map_t *drm_lookup_map(unsigned long offset,
+ unsigned long size, drm_device_t * dev)
+{
+ return NULL;
+}
+
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+ drm_device_t * dev)
+{
+ return NULL;
+}
+
#endif /* agp */
+
+void *drm_ioremap(unsigned long offset, unsigned long size,
+ drm_device_t * dev)
+{
+ if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
+ drm_map_t *map = drm_lookup_map(offset, size, dev);
+
+ if (map && map->type == _DRM_AGP)
+ return agp_remap(offset, size, dev);
+ }
+ return ioremap(offset, size);
+}
+EXPORT_SYMBOL(drm_ioremap);
+
+#if 0
+void *drm_ioremap_nocache(unsigned long offset,
+ unsigned long size, drm_device_t * dev)
+{
+ if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
+ drm_map_t *map = drm_lookup_map(offset, size, dev);
+
+ if (map && map->type == _DRM_AGP)
+ return agp_remap(offset, size, dev);
+ }
+ return ioremap_nocache(offset, size);
+}
+#endif /* 0 */
+
+void drm_ioremapfree(void *pt, unsigned long size,
+ drm_device_t * dev)
+{
+ /*
+ * This is a bit ugly. It would be much cleaner if the DRM API would use separate
+ * routines for handling mappings in the AGP space. Hopefully this can be done in
+ * a future revision of the interface...
+ */
+ if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
+ && ((unsigned long)pt >= VMALLOC_START
+ && (unsigned long)pt < VMALLOC_END)) {
+ unsigned long offset;
+ drm_map_t *map;
+
+ offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
+ map = drm_lookup_map(offset, size, dev);
+ if (map && map->type == _DRM_AGP) {
+ vunmap(pt);
+ return;
+ }
+ }
+
+ iounmap(pt);
+}
+EXPORT_SYMBOL(drm_ioremapfree);
+
#endif /* debug_memory */
# endif
#endif
-/*
- * Find the drm_map that covers the range [offset, offset+size).
- */
-static inline drm_map_t *drm_lookup_map(unsigned long offset,
- unsigned long size, drm_device_t * dev)
-{
- struct list_head *list;
- drm_map_list_t *r_list;
- drm_map_t *map;
-
- list_for_each(list, &dev->maplist->head) {
- r_list = (drm_map_list_t *) list;
- map = r_list->map;
- if (!map)
- continue;
- if (map->offset <= offset
- && (offset + size) <= (map->offset + map->size))
- return map;
- }
- return NULL;
-}
-
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
-{
- unsigned long *phys_addr_map, i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (!agpmem)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(num_pages * sizeof(struct page *));
- if (!page_map)
- return NULL;
-
- phys_addr_map =
- agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
- for (i = 0; i < num_pages; ++i)
- page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
static inline unsigned long drm_follow_page(void *vaddr)
{
pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
#else /* __OS_HAS_AGP */
-static inline drm_map_t *drm_lookup_map(unsigned long offset,
- unsigned long size, drm_device_t * dev)
-{
- return NULL;
-}
-
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
-{
- return NULL;
-}
-
static inline unsigned long drm_follow_page(void *vaddr)
{
return 0;
#endif
-static inline void *drm_ioremap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
-{
- if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
- drm_map_t *map = drm_lookup_map(offset, size, dev);
-
- if (map && map->type == _DRM_AGP)
- return agp_remap(offset, size, dev);
- }
- return ioremap(offset, size);
-}
-
-static inline void *drm_ioremap_nocache(unsigned long offset,
- unsigned long size, drm_device_t * dev)
-{
- if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
- drm_map_t *map = drm_lookup_map(offset, size, dev);
-
- if (map && map->type == _DRM_AGP)
- return agp_remap(offset, size, dev);
- }
- return ioremap_nocache(offset, size);
-}
-
-static inline void drm_ioremapfree(void *pt, unsigned long size,
- drm_device_t * dev)
-{
- /*
- * This is a bit ugly. It would be much cleaner if the DRM API would use separate
- * routines for handling mappings in the AGP space. Hopefully this can be done in
- * a future revision of the interface...
- */
- if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
- && ((unsigned long)pt >= VMALLOC_START
- && (unsigned long)pt < VMALLOC_END)) {
- unsigned long offset;
- drm_map_t *map;
-
- offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
- map = drm_lookup_map(offset, size, dev);
- if (map && map->type == _DRM_AGP) {
- vunmap(pt);
- return;
- }
- }
+void *drm_ioremap(unsigned long offset, unsigned long size,
+ drm_device_t * dev);
- iounmap(pt);
-}
+void drm_ioremapfree(void *pt, unsigned long size,
+ drm_device_t * dev);
return pt;
}
+#if 0
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
spin_unlock(&drm_mem_lock);
return pt;
}
+#endif /* 0 */
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
int alloc_count;
*/
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include "drmP.h"
/**********************************************************************/
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_irq_sequence;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+ drm_via_irq_t *cur_irq;
int ret = 0;
- maskarray_t *masks = dev_priv->irq_masks;
+ maskarray_t *masks;
int real_irq;
DRM_DEBUG("%s\n", __FUNCTION__);
__FUNCTION__, irq);
return DRM_ERR(EINVAL);
}
-
- cur_irq += real_irq;
+
+ masks = dev_priv->irq_masks;
+ cur_irq = dev_priv->via_irqs + real_irq;
if (masks[real_irq][2] && !force_sequence) {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+ drm_via_irq_t *cur_irq;
int i;
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
if (dev_priv) {
+ cur_irq = dev_priv->via_irqs;
dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length)
{
- return do_sys_ftruncate(fd, length, 1);
+ long ret = do_sys_ftruncate(fd, length, 1);
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
/* LFS versions of truncate are only needed on 32 bit machines */
asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
{
- return do_sys_ftruncate(fd, length, 0);
+ long ret = do_sys_ftruncate(fd, length, 0);
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
#endif
asmlinkage long sys_open(const char __user *filename, int flags, int mode)
{
+ long ret;
+
if (force_o_largefile())
flags |= O_LARGEFILE;
- return do_sys_open(AT_FDCWD, filename, flags, mode);
+ ret = do_sys_open(AT_FDCWD, filename, flags, mode);
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(sys_open);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
int mode)
{
+ long ret;
+
if (force_o_largefile())
flags |= O_LARGEFILE;
- return do_sys_open(dfd, filename, flags, mode);
+ ret = do_sys_open(dfd, filename, flags, mode);
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(sys_openat);
*/
extern unsigned int virt_irq_to_real_map[NR_IRQS];
+/* The maximum virtual IRQ number that we support. This
+ * can be set by the platform and will be reduced by the
+ * value of __irq_offset_value. It defaults to and is
+ * capped by (NR_IRQS - 1).
+ */
+extern unsigned int virt_irq_max;
+
/* Create a mapping for a real_irq if it doesn't already exist.
* Return the virtual irq as a convenience.
*/
int preempt_count; /* 0 => preemptable,
<0 => BUG */
struct restart_block restart_block;
+ unsigned long local_flags; /* private flags for thread */
+
/* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp;
};
_TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+/* Bits in local_flags */
+/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
+
+#define _TLF_NAPPING (1 << TLF_NAPPING)
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_THREAD_INFO_H */
#include <asm/smp.h>
-#define NODEMAPSIZE 0xfff
+/* Should really switch to dynamic allocation at some point */
+#define NODEMAPSIZE 0x4fff
/* Simple perfect hash to map physical addresses to node numbers */
struct memnode {
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 276
__SYSCALL(__NR_tee, sys_tee)
+#define __NR_sync_file_range 277
+__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
-#define __NR_syscall_max __NR_tee
+#define __NR_syscall_max __NR_sync_file_range
#ifndef __NO_STUBS
SLOB is more space efficient but does not scale well and is
more susceptible to fragmentation.
-config DOUBLEFAULT
- default y
- bool "Enable doublefault exception handler" if EMBEDDED && X86_32
- help
- This option allows trapping of rare doublefault exceptions that
- would otherwise cause a system to silently reboot. Disabling this
- option saves about 4k and might cause you much additional grey
- hair.
-
endmenu # General setup
config TINY_SHMEM