Merge git://git.infradead.org/battery-2.6
[pandora-kernel.git] / arch / ia64 / kernel / perfmon.c
index abc7ad0..59169bf 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
-#include <linux/smp_lock.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/init.h>
  */
 #define PROTECT_CTX(c, f) \
        do {  \
-               DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
+               DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
                spin_lock_irqsave(&(c)->ctx_lock, f); \
-               DPRINT(("spinlocked ctx %p  by [%d]\n", c, current->pid)); \
+               DPRINT(("spinlocked ctx %p  by [%d]\n", c, task_pid_nr(current))); \
        } while(0)
 
 #define UNPROTECT_CTX(c, f) \
        do { \
-               DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
+               DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
                spin_unlock_irqrestore(&(c)->ctx_lock, f); \
        } while(0)
 
 #ifdef PFM_DEBUGGING
 #define DPRINT(a) \
        do { \
-               if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
+               if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
        } while (0)
 
 #define DPRINT_ovfl(a) \
        do { \
-               if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
+               if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
        } while (0)
 #endif
 
@@ -592,13 +591,13 @@ pfm_set_task_notify(struct task_struct *task)
        struct thread_info *info;
 
        info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
-       set_bit(TIF_NOTIFY_RESUME, &info->flags);
+       set_bit(TIF_PERFMON_WORK, &info->flags);
 }
 
 static inline void
 pfm_clear_task_notify(void)
 {
-       clear_thread_flag(TIF_NOTIFY_RESUME);
+       clear_thread_flag(TIF_PERFMON_WORK);
 }
 
 static inline void
@@ -914,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task)
        unsigned long mask, val, ovfl_mask;
        int i;
 
-       DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
+       DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
 
        ovfl_mask = pmu_conf->ovfl_val;
        /*
@@ -993,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task)
        ovfl_mask = pmu_conf->ovfl_val;
 
        if (task != current) {
-               printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
+               printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
                return;
        }
        if (ctx->ctx_state != PFM_CTX_MASKED) {
                printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
-                       task->pid, current->pid, ctx->ctx_state);
+                       task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
                return;
        }
        psr = pfm_get_psr();
@@ -1052,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task)
                if ((mask & 0x1) == 0UL) continue;
                ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
                ia64_set_pmc(i, ctx->th_pmcs[i]);
-               DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
+               DPRINT(("[%d] pmc[%d]=0x%lx\n",
+                                       task_pid_nr(task), i, ctx->th_pmcs[i]));
        }
        ia64_srlz_d();
 
@@ -1319,7 +1319,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 {
        unsigned long flags;
        /*
-        * validy checks on cpu_mask have been done upstream
+        * validity checks on cpu_mask have been done upstream
         */
        LOCK_PFS(flags);
 
@@ -1371,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 
 error_conflict:
        DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
-               pfm_sessions.pfs_sys_session[cpu]->pid,
+               task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
                cpu));
 abort:
        UNLOCK_PFS(flags);
@@ -1385,7 +1385,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
 {
        unsigned long flags;
        /*
-        * validy checks on cpu_mask have been done upstream
+        * validity checks on cpu_mask have been done upstream
         */
        LOCK_PFS(flags);
 
@@ -1443,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
 
        /* sanity checks */
        if (task->mm == NULL || size == 0UL || vaddr == NULL) {
-               printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
+               printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
                return -EINVAL;
        }
 
@@ -1460,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
 
        up_write(&task->mm->mmap_sem);
        if (r !=0) {
-               printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
+               printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
        }
 
        DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
@@ -1502,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx)
        return 0;
 
 invalid_free:
-       printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
+       printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
        return -EINVAL;
 }
 #endif
@@ -1539,13 +1539,6 @@ init_pfm_fs(void)
        return err;
 }
 
-static void __exit
-exit_pfm_fs(void)
-{
-       unregister_filesystem(&pfm_fs_type);
-       mntput(pfmfs_mnt);
-}
-
 static ssize_t
 pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
 {
@@ -1555,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
        unsigned long flags;
        DECLARE_WAITQUEUE(wait, current);
        if (PFM_IS_FILE(filp) == 0) {
-               printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
                return -EINVAL;
        }
 
        ctx = (pfm_context_t *)filp->private_data;
        if (ctx == NULL) {
-               printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
                return -EINVAL;
        }
 
@@ -1615,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
 
                PROTECT_CTX(ctx, flags);
        }
-       DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
+       DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
 
@@ -1624,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
        ret = -EINVAL;
        msg = pfm_get_next_msg(ctx);
        if (msg == NULL) {
-               printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
+               printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
                goto abort_locked;
        }
 
@@ -1655,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait)
        unsigned int mask = 0;
 
        if (PFM_IS_FILE(filp) == 0) {
-               printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
                return 0;
        }
 
        ctx = (pfm_context_t *)filp->private_data;
        if (ctx == NULL) {
-               printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
                return 0;
        }
 
@@ -1700,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
        ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
 
        DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
-               current->pid,
+               task_pid_nr(current),
                fd,
                on,
                ctx->ctx_async_queue, ret));
@@ -1715,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on)
        int ret;
 
        if (PFM_IS_FILE(filp) == 0) {
-               printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
                return -EBADF;
        }
 
        ctx = (pfm_context_t *)filp->private_data;
        if (ctx == NULL) {
-               printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
                return -EBADF;
        }
        /*
@@ -1767,7 +1760,7 @@ pfm_syswide_force_stop(void *info)
        if (owner != ctx->ctx_task) {
                printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
                        smp_processor_id(),
-                       owner->pid, ctx->ctx_task->pid);
+                       task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
                return;
        }
        if (GET_PMU_CTX() != ctx) {
@@ -1777,7 +1770,7 @@ pfm_syswide_force_stop(void *info)
                return;
        }
 
-       DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));       
+       DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
        /*
         * the context is already protected in pfm_close(), we simply
         * need to mask interrupts to avoid a PMU interrupt race on
@@ -1829,14 +1822,14 @@ pfm_flush(struct file *filp, fl_owner_t id)
 
        ctx = (pfm_context_t *)filp->private_data;
        if (ctx == NULL) {
-               printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
                return -EBADF;
        }
 
        /*
         * remove our file from the async queue, if we use this mode.
         * This can be done without the context being protected. We come
-        * here when the context has become unreacheable by other tasks.
+        * here when the context has become unreachable by other tasks.
         *
         * We may still have active monitoring at this point and we may
         * end up in pfm_overflow_handler(). However, fasync_helper()
@@ -1977,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp)
        
        ctx = (pfm_context_t *)filp->private_data;
        if (ctx == NULL) {
-               printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
+               printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
                return -EBADF;
        }
 
@@ -2074,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp)
                 */
                ctx->ctx_state = PFM_CTX_ZOMBIE;
 
-               DPRINT(("zombie ctx for [%d]\n", task->pid));
+               DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
                /*
                 * cannot free the context on the spot. deferred until
                 * the task notices the ZOMBIE state
@@ -2133,7 +2126,7 @@ doit:
        filp->private_data = NULL;
 
        /*
-        * if we free on the spot, the context is now completely unreacheable
+        * if we free on the spot, the context is now completely unreachable
         * from the callers side. The monitored task side is also cut, so we
         * can freely cut.
         *
@@ -2480,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
        /* invoke and lock buffer format, if found */
        fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
        if (fmt == NULL) {
-               DPRINT(("[%d] cannot find buffer format\n", task->pid));
+               DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
                return -EINVAL;
        }
 
@@ -2491,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
 
        ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
 
-       DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
+       DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
 
        if (ret) goto error;
 
@@ -2563,7 +2556,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
        ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
 
        /*
-        * bitmask of all PMDs that are accesible to this context
+        * bitmask of all PMDs that are accessible to this context
         */
        ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
 
@@ -2613,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
         * no kernel task or task not owner by caller
         */
        if (task->mm == NULL) {
-               DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
+               DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
                return -EPERM;
        }
        if (pfm_bad_permissions(task)) {
-               DPRINT(("no permission to attach to  [%d]\n", task->pid));
+               DPRINT(("no permission to attach to  [%d]\n", task_pid_nr(task)));
                return -EPERM;
        }
        /*
         * cannot block in self-monitoring mode
         */
        if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
-               DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
+               DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
                return -EINVAL;
        }
 
        if (task->exit_state == EXIT_ZOMBIE) {
-               DPRINT(("cannot attach to  zombie task [%d]\n", task->pid));
+               DPRINT(("cannot attach to  zombie task [%d]\n", task_pid_nr(task)));
                return -EBUSY;
        }
 
@@ -2639,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
        if (task == current) return 0;
 
        if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
-               DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
+               DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
                return -EBUSY;
        }
        /*
@@ -3396,7 +3389,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
                /*
                 * we can only read the register that we use. That includes
-                * the one we explicitely initialize AND the one we want included
+                * the one we explicitly initialize AND the one we want included
                 * in the sampling buffer (smpl_regs).
                 *
                 * Having this restriction allows optimization in the ctxsw routine
@@ -3520,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task)
 
        if (pmu_conf->use_rr_dbregs == 0) return 0;
 
-       DPRINT(("called for [%d]\n", task->pid));
+       DPRINT(("called for [%d]\n", task_pid_nr(task)));
 
        /*
         * do it only once
@@ -3551,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task)
        DPRINT(("ptrace_use_dbregs=%u  sys_use_dbregs=%u by [%d] ret = %d\n",
                  pfm_sessions.pfs_ptrace_use_dbregs,
                  pfm_sessions.pfs_sys_use_dbregs,
-                 task->pid, ret));
+                 task_pid_nr(task), ret));
 
        UNLOCK_PFS(flags);
 
@@ -3576,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task)
 
        LOCK_PFS(flags);
        if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
-               printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
+               printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
                ret = -1;
        }  else {
                pfm_sessions.pfs_ptrace_use_dbregs--;
@@ -3628,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
 
        /* sanity check */
        if (unlikely(task == NULL)) {
-               printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
+               printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
                return -EINVAL;
        }
 
@@ -3637,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                fmt = ctx->ctx_buf_fmt;
 
                DPRINT(("restarting self %d ovfl=0x%lx\n",
-                       task->pid,
+                       task_pid_nr(task),
                        ctx->ctx_ovfl_regs[0]));
 
                if (CTX_HAS_SMPL(ctx)) {
@@ -3661,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                                pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
 
                        if (rst_ctrl.bits.mask_monitoring == 0) {
-                               DPRINT(("resuming monitoring for [%d]\n", task->pid));
+                               DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
 
                                if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
                        } else {
-                               DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
+                               DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
 
                                // cannot use pfm_stop_monitoring(task, regs);
                        }
@@ -3716,16 +3709,16 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
         * if non-blocking, then we ensure that the task will go into
         * pfm_handle_work() before returning to user mode.
         *
-        * We cannot explicitely reset another task, it MUST always
+        * We cannot explicitly reset another task, it MUST always
         * be done by the task itself. This works for system wide because
         * the tool that is controlling the session is logically doing 
         * "self-monitoring".
         */
        if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
-               DPRINT(("unblocking [%d] \n", task->pid));
+               DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
                complete(&ctx->ctx_restart_done);
        } else {
-               DPRINT(("[%d] armed exit trap\n", task->pid));
+               DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
 
                ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
 
@@ -3813,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
         * don't bother if we are loaded and task is being debugged
         */
        if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
-               DPRINT(("debug registers already in use for [%d]\n", task->pid));
+               DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
                return -EBUSY;
        }
 
@@ -3854,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
         * is shared by all processes running on it
         */
        if (first_time && can_access_pmu) {
-               DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
+               DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
                for (i=0; i < pmu_conf->num_ibrs; i++) {
                        ia64_set_ibr(i, 0UL);
                        ia64_dv_serialize_instruction();
@@ -4043,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                return -EBUSY;
        }
        DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
-               PFM_CTX_TASK(ctx)->pid,
+               task_pid_nr(PFM_CTX_TASK(ctx)),
                state,
                is_system));
        /*
@@ -4101,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                 * monitoring disabled in kernel at next reschedule
                 */
                ctx->ctx_saved_psr_up = 0;
-               DPRINT(("task=[%d]\n", task->pid));
+               DPRINT(("task=[%d]\n", task_pid_nr(task)));
        }
        return 0;
 }
@@ -4306,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
 
                if (is_system) {
                        if (pfm_sessions.pfs_ptrace_use_dbregs) {
-                               DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
+                               DPRINT(("cannot load [%d] dbregs in use\n",
+                                                       task_pid_nr(task)));
                                ret = -EBUSY;
                        } else {
                                pfm_sessions.pfs_sys_use_dbregs++;
-                               DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
+                               DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
                                set_dbregs = 1;
                        }
                }
@@ -4402,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
 
                        /* allow user level control */
                        ia64_psr(regs)->sp = 0;
-                       DPRINT(("clearing psr.sp for [%d]\n", task->pid));
+                       DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
 
                        SET_LAST_CPU(ctx, smp_processor_id());
                        INC_ACTIVATION();
@@ -4437,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                 */
                SET_PMU_OWNER(task, ctx);
 
-               DPRINT(("context loaded on PMU for [%d]\n", task->pid));
+               DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
        } else {
                /*
                 * when not current, task MUST be stopped, so this is safe
@@ -4501,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
        int prev_state, is_system;
        int ret;
 
-       DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
+       DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
 
        prev_state = ctx->ctx_state;
        is_system  = ctx->ctx_fl_system;
@@ -4576,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
                 */
                ia64_psr(regs)->sp = 1;
 
-               DPRINT(("setting psr.sp for [%d]\n", task->pid));
+               DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
        }
        /*
         * save PMDs to context
@@ -4616,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
        ctx->ctx_fl_can_restart  = 0;
        ctx->ctx_fl_going_zombie = 0;
 
-       DPRINT(("disconnected [%d] from context\n", task->pid));
+       DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
 
        return 0;
 }
@@ -4639,22 +4633,22 @@ pfm_exit_thread(struct task_struct *task)
 
        PROTECT_CTX(ctx, flags);
 
-       DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
+       DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
 
        state = ctx->ctx_state;
        switch(state) {
                case PFM_CTX_UNLOADED:
                        /*
-                        * only comes to thios function if pfm_context is not NULL, i.e., cannot
+                        * only comes to this function if pfm_context is not NULL, i.e., cannot
                         * be in unloaded state
                         */
-                       printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
+                       printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
                        break;
                case PFM_CTX_LOADED:
                case PFM_CTX_MASKED:
                        ret = pfm_context_unload(ctx, NULL, 0, regs);
                        if (ret) {
-                               printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
+                               printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
                        }
                        DPRINT(("ctx unloaded for current state was %d\n", state));
 
@@ -4663,12 +4657,12 @@ pfm_exit_thread(struct task_struct *task)
                case PFM_CTX_ZOMBIE:
                        ret = pfm_context_unload(ctx, NULL, 0, regs);
                        if (ret) {
-                               printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
+                               printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
                        }
                        free_ok = 1;
                        break;
                default:
-                       printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
+                       printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
                        break;
        }
        UNPROTECT_CTX(ctx, flags);
@@ -4752,7 +4746,7 @@ recheck:
        DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
                ctx->ctx_fd,
                state,
-               task->pid,
+               task_pid_nr(task),
                task->state, PFM_CMD_STOPPED(cmd)));
 
        /*
@@ -4799,7 +4793,7 @@ recheck:
         */
        if (PFM_CMD_STOPPED(cmd)) {
                if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
-                       DPRINT(("[%d] task not in stopped state\n", task->pid));
+                       DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
                        return -EBUSY;
                }
                /*
@@ -4892,7 +4886,7 @@ restart_args:
         * limit abuse to min page size
         */
        if (unlikely(sz > PFM_MAX_ARGSIZE)) {
-               printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
+               printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
                return -E2BIG;
        }
 
@@ -5039,11 +5033,11 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
 {
        int ret;
 
-       DPRINT(("entering for [%d]\n", current->pid));
+       DPRINT(("entering for [%d]\n", task_pid_nr(current)));
 
        ret = pfm_context_unload(ctx, NULL, 0, regs);
        if (ret) {
-               printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
+               printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
        }
 
        /*
@@ -5080,7 +5074,7 @@ pfm_handle_work(void)
 
        ctx = PFM_GET_CTX(current);
        if (ctx == NULL) {
-               printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
+               printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
                return;
        }
 
@@ -5248,7 +5242,7 @@ pfm_end_notify_user(pfm_context_t *ctx)
 
 /*
  * main overflow processing routine.
- * it can be called from the interrupt path or explicitely during the context switch code
+ * it can be called from the interrupt path or explicitly during the context switch code
  */
 static void
 pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
@@ -5277,7 +5271,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
        DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
                     "used_pmds=0x%lx\n",
                        pmc0,
-                       task ? task->pid: -1,
+                       task ? task_pid_nr(task): -1,
                        (regs ? regs->cr_iip : 0),
                        CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
                        ctx->ctx_used_pmds[0]));
@@ -5466,7 +5460,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
        }
 
        DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
-                       GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
+                       GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
                        PFM_GET_WORK_PENDING(task),
                        ctx->ctx_fl_trap_reason,
                        ovfl_pmds,
@@ -5491,7 +5485,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
 sanity_check:
        printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
                        smp_processor_id(),
-                       task ? task->pid : -1,
+                       task ? task_pid_nr(task) : -1,
                        pmc0);
        return;
 
@@ -5524,7 +5518,7 @@ stop_monitoring:
         *
         * Overall pretty hairy stuff....
         */
-       DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
+       DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
        pfm_clear_psr_up();
        ia64_psr(regs)->up = 0;
        ia64_psr(regs)->sp = 1;
@@ -5585,13 +5579,13 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
 
 report_spurious1:
        printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
-               this_cpu, task->pid);
+               this_cpu, task_pid_nr(task));
        pfm_unfreeze_pmu();
        return -1;
 report_spurious2:
        printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", 
                this_cpu, 
-               task->pid);
+               task_pid_nr(task));
        pfm_unfreeze_pmu();
        return -1;
 }
@@ -5878,7 +5872,8 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
        ia64_psr(regs)->sp = 1;
 
        if (GET_PMU_OWNER() == task) {
-               DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
+               DPRINT(("cleared ownership for [%d]\n",
+                                       task_pid_nr(ctx->ctx_task)));
                SET_PMU_OWNER(NULL, NULL);
        }
 
@@ -5890,7 +5885,7 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
        task->thread.pfm_context  = NULL;
        task->thread.flags       &= ~IA64_THREAD_PM_VALID;
 
-       DPRINT(("force cleanup for [%d]\n",  task->pid));
+       DPRINT(("force cleanup for [%d]\n",  task_pid_nr(task)));
 }
 
 
@@ -6434,7 +6429,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
 
                if (PMD_IS_COUNTING(i)) {
                        DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
-                               task->pid,
+                               task_pid_nr(task),
                                i,
                                ctx->ctx_pmds[i].val,
                                val & ovfl_val));
@@ -6456,11 +6451,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
                         */
                        if (pmc0 & (1UL << i)) {
                                val += 1 + ovfl_val;
-                               DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
+                               DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
                        }
                }
 
-               DPRINT(("[%d] ctx_pmd[%d]=0x%lx  pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
+               DPRINT(("[%d] ctx_pmd[%d]=0x%lx  pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
 
                if (is_self) ctx->th_pmds[i] = pmd_val;
 
@@ -6801,14 +6796,14 @@ dump_pmu_state(const char *from)
        printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", 
                this_cpu, 
                from, 
-               current->pid, 
+               task_pid_nr(current),
                regs->cr_iip,
                current->comm);
 
        task = GET_PMU_OWNER();
        ctx  = GET_PMU_CTX();
 
-       printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
+       printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
 
        psr = pfm_get_psr();
 
@@ -6856,7 +6851,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
 {
        struct thread_struct *thread;
 
-       DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
+       DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
 
        thread = &task->thread;