Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:53:26 +0000 (18:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:53:26 +0000 (18:53 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits)
  MN10300: Save frame pointer in thread_info struct rather than global var
  MN10300: Change "Matsushita" to "Panasonic".
  MN10300: Create a defconfig for the ASB2364 board
  MN10300: Update the ASB2303 defconfig
  MN10300: ASB2364: Add support for SMSC911X and SMC911X
  MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA
  MN10300: Generic time support
  MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support
  MN10300: Map userspace atomic op regs as a vmalloc page
  MN10300: And Panasonic AM34 subarch and implement SMP
  MN10300: Delete idle_timestamp from irq_cpustat_t
  MN10300: Make various interrupt priority settings configurable
  MN10300: Optimise do_csum()
  MN10300: Implement atomic ops using atomic ops unit
  MN10300: Make the FPU operate in non-lazy mode under SMP
  MN10300: SMP TLB flushing
  MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control
  MN10300: Make the use of PIDR to mark TLB entries controllable
  MN10300: Rename __flush_tlb*() to local_flush_tlb*()
  MN10300: AM34 erratum requires MMUCTR read and write on exception entry
  ...

393 files changed:
Documentation/accounting/getdelays.c
Documentation/cgroups/cgroups.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/proc.txt
Documentation/networking/phy.txt
Documentation/sysctl/vm.txt
arch/alpha/kernel/pci_iommu.c
arch/alpha/kernel/ptrace.c
arch/arm/kernel/ptrace.c
arch/arm/mach-tegra/timer.c
arch/arm/mm/highmem.c
arch/arm/plat-nomadik/include/plat/ste_dma40.h
arch/avr32/kernel/ptrace.c
arch/blackfin/kernel/ptrace.c
arch/cris/arch-v10/kernel/ptrace.c
arch/cris/arch-v32/kernel/ptrace.c
arch/frv/kernel/ptrace.c
arch/frv/mm/highmem.c
arch/h8300/kernel/ptrace.c
arch/ia64/include/asm/cputime.h
arch/ia64/kernel/ptrace.c
arch/m32r/kernel/ptrace.c
arch/m68k/kernel/ptrace.c
arch/m68knommu/kernel/ptrace.c
arch/microblaze/kernel/ptrace.c
arch/mips/include/asm/pci/bridge.h
arch/mips/kernel/ptrace.c
arch/mips/mm/highmem.c
arch/mn10300/include/asm/highmem.h
arch/mn10300/kernel/ptrace.c
arch/parisc/kernel/ptrace.c
arch/powerpc/include/asm/cputime.h
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/highmem.c
arch/powerpc/sysdev/fsl_rio.c
arch/s390/include/asm/cputime.h
arch/s390/kernel/ptrace.c
arch/score/kernel/ptrace.c
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/ptrace_64.c
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/pci_64.h
arch/sparc/kernel/ptrace_32.c
arch/sparc/kernel/ptrace_64.c
arch/sparc/mm/highmem.c
arch/tile/Kconfig
arch/tile/kernel/ptrace.c
arch/tile/kernel/setup.c
arch/tile/mm/highmem.c
arch/um/kernel/ptrace.c
arch/um/sys-i386/ptrace.c
arch/um/sys-x86_64/ptrace.c
arch/x86/Kconfig
arch/x86/include/asm/irq.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/smp.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/mm/highmem_32.c
arch/x86/mm/init_64.c
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/op_model_amd.c
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
arch/xtensa/kernel/ptrace.c
drivers/atm/eni.c
drivers/char/applicom.c
drivers/char/hvc_console.c
drivers/char/ip2/Makefile
drivers/char/ipmi/Makefile
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mmtimer.c
drivers/char/mwave/Makefile
drivers/char/mxser.c
drivers/char/pcmcia/ipwireless/Makefile
drivers/char/ppdev.c
drivers/char/ramoops.c
drivers/char/rio/Makefile
drivers/char/rocket.c
drivers/char/synclink_gt.c
drivers/char/vt_ioctl.c
drivers/connector/cn_queue.c
drivers/connector/connector.c
drivers/dma/pch_dma.c
drivers/firmware/dmi_scan.c
drivers/gpio/74x164.c [new file with mode: 0644]
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/adp5588-gpio.c
drivers/gpio/basic_mmio_gpio.c [new file with mode: 0644]
drivers/gpio/langwell_gpio.c
drivers/gpio/pch_gpio.c [new file with mode: 0644]
drivers/gpio/timbgpio.c
drivers/i2c/busses/scx200_acb.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/isdn/hisax/l3_1tr6.c
drivers/media/IR/lirc_dev.c
drivers/net/atl1c/atl1c.h
drivers/net/atl1c/atl1c_main.c
drivers/net/atlx/atl1.c
drivers/net/atlx/atl1.h
drivers/net/atlx/atlx.c
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_main.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_init_ops.h
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_link.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bonding/bond_main.c
drivers/net/caif/Kconfig
drivers/net/caif/Makefile
drivers/net/caif/caif_shm_u5500.c [new file with mode: 0644]
drivers/net/caif/caif_shmcore.c [new file with mode: 0644]
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/at91_can.c
drivers/net/can/flexcan.c
drivers/net/can/mcp251x.c
drivers/net/can/pch_can.c [new file with mode: 0644]
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/Makefile
drivers/net/can/sja1000/tscan1.c [new file with mode: 0644]
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb4/cxgb4.h
drivers/net/cxgb4/cxgb4_main.c
drivers/net/cxgb4/sge.c
drivers/net/e1000/e1000_main.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/gianfar.c
drivers/net/jme.c
drivers/net/macb.c
drivers/net/mlx4/icm.c
drivers/net/mlx4/icm.h
drivers/net/mlx4/port.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_ethtool.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_main.c
drivers/net/qlge/qlge_mpi.c
drivers/net/sb1000.c
drivers/net/sgiseeq.c
drivers/net/slhc.c
drivers/net/tg3.c
drivers/net/tokenring/tms380tr.c
drivers/net/typhoon.c
drivers/net/vmxnet3/upt1_defs.h
drivers/net/vmxnet3/vmxnet3_defs.h
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-ethtool.c
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-traffic.c
drivers/net/vxge/vxge-traffic.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/cmd.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/wl1251/Makefile
drivers/pci/proc.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/rapidio/rio-driver.c
drivers/rapidio/rio-scan.c
drivers/rapidio/rio-sysfs.c
drivers/rapidio/rio.c
drivers/rapidio/rio.h
drivers/rapidio/switches/Kconfig
drivers/rapidio/switches/Makefile
drivers/rapidio/switches/idt_gen2.c [new file with mode: 0644]
drivers/rapidio/switches/idtcps.c
drivers/rapidio/switches/tsi568.c
drivers/rapidio/switches/tsi57x.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-ds3232.c
drivers/rtc/rtc-jz4740.c
drivers/rtc/rtc-lpc32xx.c [new file with mode: 0644]
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-s3c.c
drivers/video/fbmem.c
drivers/video/gbefb.c
drivers/video/matrox/matroxfb_DAC1064.c
drivers/video/matrox/matroxfb_maven.c
drivers/video/omap/blizzard.c
drivers/video/savage/savagefb-i2c.c
drivers/w1/w1.c
fs/Kconfig
fs/Kconfig.binfmt
fs/eventpoll.c
fs/exec.c
fs/fcntl.c
fs/fuse/dev.c
fs/isofs/inode.c
fs/lockd/svc.c
fs/lockd/svclock.c
fs/lockd/svcsubs.c
fs/locks.c
fs/nfs/Kconfig
fs/nfsd/Kconfig
fs/nfsd/nfs4state.c
fs/proc/base.c
fs/proc/softirqs.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/select.c
include/asm-generic/cputime.h
include/asm-generic/gpio.h
include/linux/basic_mmio_gpio.h [new file with mode: 0644]
include/linux/cgroup.h
include/linux/connector.h
include/linux/fb.h
include/linux/fs.h
include/linux/highmem.h
include/linux/i2c/adp5588.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/kernel_stat.h
include/linux/kfifo.h
include/linux/netdevice.h
include/linux/percpu-defs.h
include/linux/phy.h
include/linux/poll.h
include/linux/ptrace.h
include/linux/ramoops.h [new file with mode: 0644]
include/linux/ring_buffer.h
include/linux/rio.h
include/linux/rio_ids.h
include/linux/rio_regs.h
include/linux/sched.h
include/linux/spi/74x164.h [new file with mode: 0644]
include/linux/synclink.h
include/linux/syscalls.h
include/linux/tracehook.h
include/linux/virtio_9p.h
include/net/caif/caif_shm.h [new file with mode: 0644]
include/net/dst.h
include/net/fib_rules.h
include/net/garp.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_tunnel.h
include/net/ipip.h
include/net/net_namespace.h
include/net/protocol.h
include/net/sock.h
include/net/xfrm.h
include/trace/events/irq.h
init/Kconfig
ipc/compat.c
ipc/compat_mq.c
ipc/shm.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cred.c
kernel/exit.c
kernel/fork.c
kernel/irq/irqdesc.c
kernel/kprobes.c
kernel/module.c
kernel/ns_cgroup.c
kernel/perf_event.c
kernel/ptrace.c
kernel/resource.c
kernel/signal.c
kernel/softirq.c
kernel/taskstats.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_kprobe.c
kernel/tsacct.c
mm/highmem.c
mm/memcontrol.c
mm/swap.c
net/802/garp.c
net/802/stp.c
net/8021q/vlan.c
net/core/dev.c
net/core/fib_rules.c
net/core/filter.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/pktgen.c
net/core/sock.c
net/core/sysctl_net_core.c
net/ipv4/fib_hash.c
net/ipv4/gre.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/ip_sockglue.c
net/ipv4/ipip.c
net/ipv4/protocol.c
net/ipv4/route.c
net/ipv4/tunnel4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/protocol.c
net/ipv6/raw.c
net/ipv6/sit.c
net/ipv6/tunnel6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/rate.c
net/netfilter/Kconfig
net/netfilter/xt_TPROXY.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/wireless/reg.c
sound/oss/sb_ess.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/ad73311.c
sound/soc/codecs/max98088.c
sound/soc/codecs/wm9090.c
sound/soc/fsl/pcm030-audio-fabric.c
sound/usb/card.h
sound/usb/pcm.c
sound/usb/proc.c
sound/usb/urb.c
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-record.txt
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-trace.c
tools/perf/scripts/perl/bin/failed-syscalls-report
tools/perf/scripts/perl/bin/rw-by-file-report
tools/perf/scripts/perl/bin/rw-by-pid-report
tools/perf/scripts/perl/bin/rwtop-report
tools/perf/scripts/perl/bin/wakeup-latency-report
tools/perf/scripts/perl/bin/workqueue-stats-report
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
tools/perf/scripts/python/bin/futex-contention-record [new file with mode: 0644]
tools/perf/scripts/python/bin/futex-contention-report [new file with mode: 0644]
tools/perf/scripts/python/bin/netdev-times-report
tools/perf/scripts/python/bin/sched-migration-report
tools/perf/scripts/python/bin/sctop-report
tools/perf/scripts/python/bin/syscall-counts-by-pid-report
tools/perf/scripts/python/bin/syscall-counts-report
tools/perf/scripts/python/failed-syscalls-by-pid.py
tools/perf/scripts/python/futex-contention.py [new file with mode: 0644]
tools/perf/scripts/python/sctop.py
tools/perf/scripts/python/syscall-counts-by-pid.py
tools/perf/scripts/python/syscall-counts.py
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/map.h
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/ui/browser.c

index 6e25c26..a2976a6 100644 (file)
@@ -21,6 +21,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/socket.h>
+#include <sys/wait.h>
 #include <signal.h>
 
 #include <linux/genetlink.h>
@@ -266,11 +267,13 @@ int main(int argc, char *argv[])
        int containerset = 0;
        char containerpath[1024];
        int cfd = 0;
+       int forking = 0;
+       sigset_t sigset;
 
        struct msgtemplate msg;
 
-       while (1) {
-               c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:");
+       while (!forking) {
+               c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:");
                if (c < 0)
                        break;
 
@@ -319,6 +322,28 @@ int main(int argc, char *argv[])
                                err(1, "Invalid pid\n");
                        cmd_type = TASKSTATS_CMD_ATTR_PID;
                        break;
+               case 'c':
+
+                       /* Block SIGCHLD for sigwait() later */
+                       if (sigemptyset(&sigset) == -1)
+                               err(1, "Failed to empty sigset");
+                       if (sigaddset(&sigset, SIGCHLD))
+                               err(1, "Failed to set sigchld in sigset");
+                       sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+                       /* fork/exec a child */
+                       tid = fork();
+                       if (tid < 0)
+                               err(1, "Fork failed\n");
+                       if (tid == 0)
+                               if (execvp(argv[optind - 1],
+                                   &argv[optind - 1]) < 0)
+                                       exit(-1);
+
+                       /* Set the command type and avoid further processing */
+                       cmd_type = TASKSTATS_CMD_ATTR_PID;
+                       forking = 1;
+                       break;
                case 'v':
                        printf("debug on\n");
                        dbg = 1;
@@ -370,6 +395,15 @@ int main(int argc, char *argv[])
                goto err;
        }
 
+       /*
+        * If we forked a child, wait for it to exit. Cannot use waitpid()
+        * as all the delicious data would be reaped as part of the wait
+        */
+       if (tid && forking) {
+               int sig_received;
+               sigwait(&sigset, &sig_received);
+       }
+
        if (tid) {
                rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
                              cmd_type, &tid, sizeof(__u32));
index b34823f..190018b 100644 (file)
@@ -18,7 +18,8 @@ CONTENTS:
   1.2 Why are cgroups needed ?
   1.3 How are cgroups implemented ?
   1.4 What does notify_on_release do ?
-  1.5 How do I use cgroups ?
+  1.5 What does clone_children do ?
+  1.6 How do I use cgroups ?
 2. Usage Examples and Syntax
   2.1 Basic Usage
   2.2 Attaching processes
@@ -293,7 +294,16 @@ notify_on_release in the root cgroup at system boot is disabled
 value of their parents notify_on_release setting. The default value of
 a cgroup hierarchy's release_agent path is empty.
 
-1.5 How do I use cgroups ?
+1.5 What does clone_children do ?
+---------------------------------
+
+If the clone_children flag is enabled (1) in a cgroup, then all
+cgroups created beneath will call the post_clone callbacks for each
+subsystem of the newly created cgroup. Usually when this callback is
+implemented for a subsystem, it copies the values of the parent
+subsystem, this is the case for the cpuset.
+
+1.6 How do I use cgroups ?
 --------------------------
 
 To start a new job that is to be contained within a cgroup, using
index d2af87b..f3da8c0 100644 (file)
@@ -526,6 +526,23 @@ Who:       FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
 
 ----------------------------
 
+What:   namespace cgroup (ns_cgroup)
+When:   2.6.38
+Why:    The ns_cgroup leads to some problems:
+       * cgroup creation is out-of-control
+       * cgroup name can conflict when pids are looping
+       * it is not possible to have a single process handling
+       a lot of namespaces without falling in a exponential creation time
+       * we may want to create a namespace without creating a cgroup
+
+       The ns_cgroup is replaced by a compatibility flag 'clone_children',
+       where a newly created cgroup will copy the parent cgroup values.
+       The userspace has to manually create a cgroup and add a task to
+       the 'tasks' file.
+Who:    Daniel Lezcano <daniel.lezcano@free.fr>
+
+----------------------------
+
 What:  iwlwifi disable_hw_scan module parameters
 When:  2.6.40
 Why:   Hareware scan is the prefer method for iwlwifi devices for
index a563b74..e73df27 100644 (file)
@@ -136,6 +136,7 @@ Table 1-1: Process specific entries in /proc
  statm         Process memory status information
  status                Process status in human readable form
  wchan         If CONFIG_KALLSYMS is set, a pre-decoded wchan
+ pagemap       Page table
  stack         Report full stack trace, enable via CONFIG_STACKTRACE
  smaps         a extension based on maps, showing the memory consumption of
                each mapping
@@ -370,6 +371,7 @@ Shared_Dirty:          0 kB
 Private_Clean:         0 kB
 Private_Dirty:         0 kB
 Referenced:          892 kB
+Anonymous:             0 kB
 Swap:                  0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
@@ -378,9 +380,15 @@ The first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
 (size), the amount of the mapping that is currently resident in RAM (RSS), the
 process' proportional share of this mapping (PSS), the number of clean and
-dirty shared pages in the mapping, and the number of clean and dirty private
-pages in the mapping.  The "Referenced" indicates the amount of memory
-currently marked as referenced or accessed.
+dirty private pages in the mapping.  Note that even a page which is part of a
+MAP_SHARED mapping, but has only a single pte mapped, i.e.  is currently used
+by only one process, is accounted as private and not as shared.  "Referenced"
+indicates the amount of memory currently marked as referenced or accessed.
+"Anonymous" shows the amount of memory that does not belong to any file.  Even
+a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
+and a page is modified, the file page is replaced by a private anonymous copy.
+"Swap" shows how much would-be-anonymous memory is also used, but out on
+swap.
 
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
@@ -397,6 +405,9 @@ To clear the bits for the file mapped pages associated with the process
     > echo 3 > /proc/PID/clear_refs
 Any other value written to /proc/PID/clear_refs will have no effect.
 
+The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
+using /proc/kpageflags and number of times a page is mapped using
+/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
 
 1.2 Kernel data
 ---------------
index 88bb71b..9eb1ba5 100644 (file)
@@ -177,18 +177,6 @@ Doing it all yourself
  
    A convenience function to print out the PHY status neatly.
 
- int phy_clear_interrupt(struct phy_device *phydev);
- int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
-   
-   Clear the PHY's interrupt, and configure which ones are allowed,
-   respectively.  Currently only supports all on, or all off.
- int phy_enable_interrupts(struct phy_device *phydev);
- int phy_disable_interrupts(struct phy_device *phydev);
-
-   Functions which enable/disable PHY interrupts, clearing them
-   before and after, respectively.
-
  int phy_start_interrupts(struct phy_device *phydev);
  int phy_stop_interrupts(struct phy_device *phydev);
 
@@ -213,12 +201,6 @@ Doing it all yourself
    Fills the phydev structure with up-to-date information about the current
    settings in the PHY.
 
- void phy_sanitize_settings(struct phy_device *phydev)
-   
-   Resolves differences between currently desired settings, and
-   supported settings for the given PHY device.  Does not make
-   the changes in the hardware, though.
-
  int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
  int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 
index b606c2c..30289fa 100644 (file)
@@ -80,8 +80,10 @@ dirty_background_bytes
 Contains the amount of dirty memory at which the pdflush background writeback
 daemon will start writeback.
 
-If dirty_background_bytes is written, dirty_background_ratio becomes a function
-of its value (dirty_background_bytes / the amount of dirtyable system memory).
+Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
+one of them may be specified at a time. When one sysctl is written it is
+immediately taken into account to evaluate the dirty memory limits and the
+other appears as 0 when read.
 
 ==============================================================
 
@@ -97,8 +99,10 @@ dirty_bytes
 Contains the amount of dirty memory at which a process generating disk writes
 will itself start writeback.
 
-If dirty_bytes is written, dirty_ratio becomes a function of its value
-(dirty_bytes / the amount of dirtyable system memory).
+Note: dirty_bytes is the counterpart of dirty_ratio. Only one of them may be
+specified at a time. When one sysctl is written it is immediately taken into
+account to evaluate the dirty memory limits and the other appears as 0 when
+read.
 
 Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
index d1dbd9a..022c274 100644 (file)
@@ -223,7 +223,7 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
  */
 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
 {
-       dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+       dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
        int ok = 1;
 
        /* If this is not set, the machine doesn't support DAC at all.  */
@@ -756,7 +756,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
        spin_lock_irqsave(&arena->lock, flags);
 
        for (end = sg + nents; sg < end; ++sg) {
-               dma64_addr_t addr;
+               dma_addr_t addr;
                size_t size;
                long npages, ofs;
                dma_addr_t tend;
index baa9036..e2af5eb 100644 (file)
@@ -269,7 +269,8 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        size_t copied;
@@ -292,7 +293,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PTRACE_PEEKUSR:
                force_successful_syscall_return();
                ret = get_reg(child, addr);
-               DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+               DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret));
                break;
 
        /* When I and D space are separate, this will have to be fixed.  */
@@ -302,7 +303,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_POKEUSR: /* write the specified register */
-               DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+               DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data));
                ret = put_reg(child, addr, data);
                break;
        default:
index e0cb637..3e97483 100644 (file)
@@ -1075,13 +1075,15 @@ out:
 }
 #endif
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
                case PTRACE_PEEKUSR:
-                       ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
+                       ret = ptrace_read_user(child, addr, datap);
                        break;
 
                case PTRACE_POKEUSR:
@@ -1089,34 +1091,34 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        break;
 
                case PTRACE_GETREGS:
-                       ret = ptrace_getregs(child, (void __user *)data);
+                       ret = ptrace_getregs(child, datap);
                        break;
 
                case PTRACE_SETREGS:
-                       ret = ptrace_setregs(child, (void __user *)data);
+                       ret = ptrace_setregs(child, datap);
                        break;
 
                case PTRACE_GETFPREGS:
-                       ret = ptrace_getfpregs(child, (void __user *)data);
+                       ret = ptrace_getfpregs(child, datap);
                        break;
                
                case PTRACE_SETFPREGS:
-                       ret = ptrace_setfpregs(child, (void __user *)data);
+                       ret = ptrace_setfpregs(child, datap);
                        break;
 
 #ifdef CONFIG_IWMMXT
                case PTRACE_GETWMMXREGS:
-                       ret = ptrace_getwmmxregs(child, (void __user *)data);
+                       ret = ptrace_getwmmxregs(child, datap);
                        break;
 
                case PTRACE_SETWMMXREGS:
-                       ret = ptrace_setwmmxregs(child, (void __user *)data);
+                       ret = ptrace_setwmmxregs(child, datap);
                        break;
 #endif
 
                case PTRACE_GET_THREAD_AREA:
                        ret = put_user(task_thread_info(child)->tp_value,
-                                      (unsigned long __user *) data);
+                                      datap);
                        break;
 
                case PTRACE_SET_SYSCALL:
@@ -1126,21 +1128,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
 #ifdef CONFIG_CRUNCH
                case PTRACE_GETCRUNCHREGS:
-                       ret = ptrace_getcrunchregs(child, (void __user *)data);
+                       ret = ptrace_getcrunchregs(child, datap);
                        break;
 
                case PTRACE_SETCRUNCHREGS:
-                       ret = ptrace_setcrunchregs(child, (void __user *)data);
+                       ret = ptrace_setcrunchregs(child, datap);
                        break;
 #endif
 
 #ifdef CONFIG_VFP
                case PTRACE_GETVFPREGS:
-                       ret = ptrace_getvfpregs(child, (void __user *)data);
+                       ret = ptrace_getvfpregs(child, datap);
                        break;
 
                case PTRACE_SETVFPREGS:
-                       ret = ptrace_setvfpregs(child, (void __user *)data);
+                       ret = ptrace_setvfpregs(child, datap);
                        break;
 #endif
 
index 2f42021..9057d6f 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/io.h>
 #include <linux/cnt32_to_63.h>
 
-#include <asm/mach/time.h>
 #include <asm/mach/time.h>
 #include <asm/localtimer.h>
 
index c00f119..c435fd9 100644 (file)
@@ -89,7 +89,7 @@ void __kunmap_atomic(void *kvaddr)
        int idx, type;
 
        if (kvaddr >= (void *)FIXADDR_START) {
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR * smp_processor_id();
 
                if (cache_is_vivt())
@@ -101,6 +101,7 @@ void __kunmap_atomic(void *kvaddr)
 #else
                (void) idx;  /* to kill a warning */
 #endif
+               kmap_atomic_idx_pop();
        } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
                /* this address was obtained through kmap_high_get() */
                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
index 5fbde4b..93a8126 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/dmaengine.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
-#include <linux/dmaengine.h>
 
 /* dev types for memcpy */
 #define STEDMA40_DEV_DST_MEMORY (-1)
index 5e73c25..4aedcab 100644 (file)
@@ -146,9 +146,11 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
        return ret;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       void __user *datap = (void __user *) data;
 
        switch (request) {
        /* Read the word at location addr in the child process */
@@ -158,8 +160,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_PEEKUSR:
-               ret = ptrace_read_user(child, addr,
-                                      (unsigned long __user *)data);
+               ret = ptrace_read_user(child, addr, datap);
                break;
 
        /* Write the word in data at location addr */
@@ -173,11 +174,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (void __user *)data);
+               ret = ptrace_getregs(child, datap);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (const void __user *)data);
+               ret = ptrace_setregs(child, datap);
                break;
 
        default:
index b358393..75089f8 100644 (file)
  * Get contents of register REGNO in task TASK.
  */
 static inline long
-get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
+get_reg(struct task_struct *task, unsigned long regno,
+       unsigned long __user *datap)
 {
        long tmp;
        struct pt_regs *regs = task_pt_regs(task);
 
-       if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+       if (regno & 3 || regno > PT_LAST_PSEUDO)
                return -EIO;
 
        switch (regno) {
@@ -74,11 +75,11 @@ get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
  * Write contents of register REGNO in task TASK.
  */
 static inline int
-put_reg(struct task_struct *task, long regno, unsigned long data)
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
 {
        struct pt_regs *regs = task_pt_regs(task);
 
-       if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+       if (regno & 3 || regno > PT_LAST_PSEUDO)
                return -EIO;
 
        switch (regno) {
@@ -240,7 +241,8 @@ void user_disable_single_step(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
        unsigned long __user *datap = (unsigned long __user *)data;
@@ -368,14 +370,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_bfin_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETREGS:
                pr_debug("ptrace: PTRACE_SETREGS\n");
                return copy_regset_from_user(child, &user_bfin_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *)data);
+                                            datap);
 
        case_default:
        default:
index e70c804..320065f 100644 (file)
@@ -76,9 +76,11 @@ ptrace_disable(struct task_struct *child)
  * (in user space) where the result of the ptrace call is written (instead of
  * being returned).
  */
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned int regno = addr >> 2;
        unsigned long __user *datap = (unsigned long __user *)data;
 
        switch (request) {
@@ -93,10 +95,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
 
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       tmp = get_reg(child, addr >> 2);
+                       tmp = get_reg(child, regno);
                        ret = put_user(tmp, datap);
                        break;
                }
@@ -110,19 +112,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                /* Write the word at location address in the USER area. */
                case PTRACE_POKEUSR:
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       addr >>= 2;
-
-                       if (addr == PT_DCCR) {
+                       if (regno == PT_DCCR) {
                                /* don't allow the tracing process to change stuff like
                                 * interrupt enable, kernel/user bit, dma enables etc.
                                 */
                                data &= DCCR_MASK;
                                data |= get_reg(child, PT_DCCR) & ~DCCR_MASK;
                        }
-                       if (put_reg(child, addr, data))
+                       if (put_reg(child, regno, data))
                                break;
                        ret = 0;
                        break;
@@ -141,7 +141,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                        break;
                                }
                                
-                               data += sizeof(long);
+                               datap++;
                        }
 
                        break;
@@ -165,7 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                }
                                
                                put_reg(child, i, tmp);
-                               data += sizeof(long);
+                               datap++;
                        }
                        
                        break;
index f4ebd1e..511ece9 100644 (file)
@@ -126,9 +126,11 @@ ptrace_disable(struct task_struct *child)
 }
 
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned int regno = addr >> 2;
        unsigned long __user *datap = (unsigned long __user *)data;
 
        switch (request) {
@@ -163,10 +165,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
 
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       tmp = get_reg(child, addr >> 2);
+                       tmp = get_reg(child, regno);
                        ret = put_user(tmp, datap);
                        break;
                }
@@ -180,19 +182,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                /* Write the word at location address in the USER area. */
                case PTRACE_POKEUSR:
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       addr >>= 2;
-
-                       if (addr == PT_CCS) {
+                       if (regno == PT_CCS) {
                                /* don't allow the tracing process to change stuff like
                                 * interrupt enable, kernel/user bit, dma enables etc.
                                 */
                                data &= CCS_MASK;
                                data |= get_reg(child, PT_CCS) & ~CCS_MASK;
                        }
-                       if (put_reg(child, addr, data))
+                       if (put_reg(child, regno, data))
                                break;
                        ret = 0;
                        break;
index fac0289..9d68f7f 100644 (file)
@@ -254,23 +254,26 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        int ret;
+       int regno = addr >> 2;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
                /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR: {
                tmp = 0;
                ret = -EIO;
-               if ((addr & 3) || addr < 0)
+               if (addr & 3)
                        break;
 
                ret = 0;
-               switch (addr >> 2) {
+               switch (regno) {
                case 0 ... PT__END - 1:
-                       tmp = get_reg(child, addr >> 2);
+                       tmp = get_reg(child, regno);
                        break;
 
                case PT__END + 0:
@@ -299,23 +302,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
 
                if (ret == 0)
-                       ret = put_user(tmp, (unsigned long *) data);
+                       ret = put_user(tmp, datap);
                break;
        }
 
        case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                ret = -EIO;
-               if ((addr & 3) || addr < 0)
+               if (addr & 3)
                        break;
 
-               ret = 0;
-               switch (addr >> 2) {
+               switch (regno) {
                case 0 ... PT__END - 1:
-                       ret = put_reg(child, addr >> 2, data);
-                       break;
-
-               default:
-                       ret = -EIO;
+                       ret = put_reg(child, regno, data);
                        break;
                }
                break;
@@ -324,25 +322,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_frv_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(child->thread.user->i),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETREGS:    /* Set all integer regs in the child. */
                return copy_regset_from_user(child, &user_frv_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(child->thread.user->i),
-                                            (const void __user *)data);
+                                            datap);
 
        case PTRACE_GETFPREGS:  /* Get the child FP/Media state. */
                return copy_regset_to_user(child, &user_frv_native_view,
                                           REGSET_FPMEDIA,
                                           0, sizeof(child->thread.user->f),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETFPREGS:  /* Set the child FP/Media state. */
                return copy_regset_from_user(child, &user_frv_native_view,
                                             REGSET_FPMEDIA,
                                             0, sizeof(child->thread.user->f),
-                                            (const void __user *)data);
+                                            datap);
 
        default:
                ret = ptrace_request(child, request, addr, data);
index 61088dc..fd7fcd4 100644 (file)
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(__kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
-       int type = kmap_atomic_idx_pop();
+       int type = kmap_atomic_idx();
        switch (type) {
        case 0:         __kunmap_atomic_primary(4, 6);  break;
        case 1:         __kunmap_atomic_primary(5, 7);  break;
@@ -83,6 +83,7 @@ void __kunmap_atomic(void *kvaddr)
        default:
                BUG();
        }
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index df11412..497fa89 100644 (file)
@@ -50,27 +50,29 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       int regno = addr >> 2;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
                case PTRACE_PEEKUSR: {
                        unsigned long tmp = 0;
                        
-                       if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+                       if ((addr & 3) || addr >= sizeof(struct user)) {
                                ret = -EIO;
                                break ;
                        }
                        
                        ret = 0;  /* Default return condition */
-                       addr = addr >> 2; /* temporary hack. */
 
-                       if (addr < H8300_REGS_NO)
-                               tmp = h8300_get_reg(child, addr);
+                       if (regno < H8300_REGS_NO)
+                               tmp = h8300_get_reg(child, regno);
                        else {
-                               switch(addr) {
+                               switch (regno) {
                                case 49:
                                        tmp = child->mm->start_code;
                                        break ;
@@ -88,24 +90,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                }
                        }
                        if (!ret)
-                               ret = put_user(tmp,(unsigned long *) data);
+                               ret = put_user(tmp, datap);
                        break ;
                }
 
       /* when I and D space are separate, this will have to be fixed. */
                case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-                       if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+                       if ((addr & 3) || addr >= sizeof(struct user)) {
                                ret = -EIO;
                                break ;
                        }
-                       addr = addr >> 2; /* temporary hack. */
                            
-                       if (addr == PT_ORIG_ER0) {
+                       if (regno == PT_ORIG_ER0) {
                                ret = -EIO;
                                break ;
                        }
-                       if (addr < H8300_REGS_NO) {
-                               ret = h8300_put_reg(child, addr, data);
+                       if (regno < H8300_REGS_NO) {
+                               ret = h8300_put_reg(child, regno, data);
                                break ;
                        }
                        ret = -EIO;
@@ -116,11 +117,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
                        for (i = 0; i < H8300_REGS_NO; i++) {
                            tmp = h8300_get_reg(child, i);
-                           if (put_user(tmp, (unsigned long *) data)) {
+                           if (put_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
@@ -130,12 +131,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        int i;
                        unsigned long tmp;
                        for (i = 0; i < H8300_REGS_NO; i++) {
-                           if (get_user(tmp, (unsigned long *) data)) {
+                           if (get_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
                            h8300_put_reg(child, i, tmp);
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
index 7fa8a85..6073b18 100644 (file)
@@ -56,10 +56,10 @@ typedef u64 cputime64_t;
 #define jiffies64_to_cputime64(__jif)  ((__jif) * (NSEC_PER_SEC / HZ))
 
 /*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
  */
-#define cputime_to_msecs(__ct)         ((__ct) / NSEC_PER_MSEC)
-#define msecs_to_cputime(__msecs)      ((__msecs) * NSEC_PER_MSEC)
+#define cputime_to_usecs(__ct)         ((__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs)      ((__usecs) * NSEC_PER_USEC)
 
 /*
  * Convert cputime <-> seconds
index 7c7909f..8848f43 100644 (file)
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
 }
 
 long
-arch_ptrace (struct task_struct *child, long request, long addr, long data)
+arch_ptrace (struct task_struct *child, long request,
+            unsigned long addr, unsigned long data)
 {
        switch (request) {
        case PTRACE_PEEKTEXT:
index 0021ade..2074375 100644 (file)
@@ -622,9 +622,11 @@ void ptrace_disable(struct task_struct *child)
 }
 
 long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+           unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /*
@@ -639,8 +641,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
         * read the word at location addr in the USER area.
         */
        case PTRACE_PEEKUSR:
-               ret = ptrace_read_user(child, addr,
-                                      (unsigned long __user *)data);
+               ret = ptrace_read_user(child, addr, datap);
                break;
 
        /*
@@ -661,11 +662,11 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (void __user *)data);
+               ret = ptrace_getregs(child, datap);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (void __user *)data);
+               ret = ptrace_setregs(child, datap);
                break;
 
        default:
index 616e597..0b25268 100644 (file)
@@ -156,55 +156,57 @@ void user_disable_single_step(struct task_struct *child)
        singlestep_disable(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        int i, ret = 0;
+       int regno = addr >> 2; /* temporary hack. */
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR:
                if (addr & 3)
                        goto out_eio;
-               addr >>= 2;     /* temporary hack. */
 
-               if (addr >= 0 && addr < 19) {
-                       tmp = get_reg(child, addr);
-               } else if (addr >= 21 && addr < 49) {
-                       tmp = child->thread.fp[addr - 21];
+               if (regno >= 0 && regno < 19) {
+                       tmp = get_reg(child, regno);
+               } else if (regno >= 21 && regno < 49) {
+                       tmp = child->thread.fp[regno - 21];
                        /* Convert internal fpu reg representation
                         * into long double format
                         */
-                       if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+                       if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
                                tmp = ((tmp & 0xffff0000) << 15) |
                                      ((tmp & 0x0000ffff) << 16);
                } else
                        goto out_eio;
-               ret = put_user(tmp, (unsigned long *)data);
+               ret = put_user(tmp, datap);
                break;
 
-       case PTRACE_POKEUSR:    /* write the word at location addr in the USER area */
+       case PTRACE_POKEUSR:
+       /* write the word at location addr in the USER area */
                if (addr & 3)
                        goto out_eio;
-               addr >>= 2;     /* temporary hack. */
 
-               if (addr == PT_SR) {
+               if (regno == PT_SR) {
                        data &= SR_MASK;
                        data |= get_reg(child, PT_SR) & ~SR_MASK;
                }
-               if (addr >= 0 && addr < 19) {
-                       if (put_reg(child, addr, data))
+               if (regno >= 0 && regno < 19) {
+                       if (put_reg(child, regno, data))
                                goto out_eio;
-               } else if (addr >= 21 && addr < 48) {
+               } else if (regno >= 21 && regno < 48) {
                        /* Convert long double format
                         * into internal fpu reg representation
                         */
-                       if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
-                               data = (unsigned long)data << 15;
+                       if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
+                               data <<= 15;
                                data = (data & 0xffff0000) |
                                       ((data & 0x0000ffff) >> 1);
                        }
-                       child->thread.fp[addr - 21] = data;
+                       child->thread.fp[regno - 21] = data;
                } else
                        goto out_eio;
                break;
@@ -212,16 +214,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PTRACE_GETREGS:    /* Get all gp regs from the child. */
                for (i = 0; i < 19; i++) {
                        tmp = get_reg(child, i);
-                       ret = put_user(tmp, (unsigned long *)data);
+                       ret = put_user(tmp, datap);
                        if (ret)
                                break;
-                       data += sizeof(long);
+                       datap++;
                }
                break;
 
        case PTRACE_SETREGS:    /* Set all gp regs in the child. */
                for (i = 0; i < 19; i++) {
-                       ret = get_user(tmp, (unsigned long *)data);
+                       ret = get_user(tmp, datap);
                        if (ret)
                                break;
                        if (i == PT_SR) {
@@ -229,25 +231,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                tmp |= get_reg(child, PT_SR) & ~SR_MASK;
                        }
                        put_reg(child, i, tmp);
-                       data += sizeof(long);
+                       datap++;
                }
                break;
 
        case PTRACE_GETFPREGS:  /* Get the child FPU state. */
-               if (copy_to_user((void *)data, &child->thread.fp,
+               if (copy_to_user(datap, &child->thread.fp,
                                 sizeof(struct user_m68kfp_struct)))
                        ret = -EFAULT;
                break;
 
        case PTRACE_SETFPREGS:  /* Set the child FPU state. */
-               if (copy_from_user(&child->thread.fp, (void *)data,
+               if (copy_from_user(&child->thread.fp, datap,
                                   sizeof(struct user_m68kfp_struct)))
                        ret = -EFAULT;
                break;
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(task_thread_info(child)->tp_value,
-                              (unsigned long __user *)data);
+               ret = put_user(task_thread_info(child)->tp_value, datap);
                break;
 
        default:
index 6fe7c38..6709fb7 100644 (file)
@@ -112,9 +112,12 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       int regno = addr >> 2;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
                /* read the word at location addr in the USER area. */
@@ -122,53 +125,48 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
                        
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 ||
-                           addr > sizeof(struct user) - 3)
+                       if ((addr & 3) || addr > sizeof(struct user) - 3)
                                break;
                        
                        tmp = 0;  /* Default return condition */
-                       addr = addr >> 2; /* temporary hack. */
                        ret = -EIO;
-                       if (addr < 19) {
-                               tmp = get_reg(child, addr);
-                               if (addr == PT_SR)
+                       if (regno < 19) {
+                               tmp = get_reg(child, regno);
+                               if (regno == PT_SR)
                                        tmp >>= 16;
-                       } else if (addr >= 21 && addr < 49) {
-                               tmp = child->thread.fp[addr - 21];
-                       } else if (addr == 49) {
+                       } else if (regno >= 21 && regno < 49) {
+                               tmp = child->thread.fp[regno - 21];
+                       } else if (regno == 49) {
                                tmp = child->mm->start_code;
-                       } else if (addr == 50) {
+                       } else if (regno == 50) {
                                tmp = child->mm->start_data;
-                       } else if (addr == 51) {
+                       } else if (regno == 51) {
                                tmp = child->mm->end_code;
                        } else
                                break;
-                       ret = put_user(tmp,(unsigned long *) data);
+                       ret = put_user(tmp, datap);
                        break;
                }
 
                case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 ||
-                           addr > sizeof(struct user) - 3)
+                       if ((addr & 3) || addr > sizeof(struct user) - 3)
                                break;
 
-                       addr = addr >> 2; /* temporary hack. */
-                           
-                       if (addr == PT_SR) {
+                       if (regno == PT_SR) {
                                data &= SR_MASK;
                                data <<= 16;
                                data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
                        }
-                       if (addr < 19) {
-                               if (put_reg(child, addr, data))
+                       if (regno < 19) {
+                               if (put_reg(child, regno, data))
                                        break;
                                ret = 0;
                                break;
                        }
-                       if (addr >= 21 && addr < 48)
+                       if (regno >= 21 && regno < 48)
                        {
-                               child->thread.fp[addr - 21] = data;
+                               child->thread.fp[regno - 21] = data;
                                ret = 0;
                        }
                        break;
@@ -180,11 +178,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                            tmp = get_reg(child, i);
                            if (i == PT_SR)
                                tmp >>= 16;
-                           if (put_user(tmp, (unsigned long *) data)) {
+                           if (put_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
@@ -194,7 +192,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        int i;
                        unsigned long tmp;
                        for (i = 0; i < 19; i++) {
-                           if (get_user(tmp, (unsigned long *) data)) {
+                           if (get_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
@@ -204,7 +202,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
                            }
                            put_reg(child, i, tmp);
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
@@ -213,7 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_GETFPREGS
                case PTRACE_GETFPREGS: { /* Get the child FPU state. */
                        ret = 0;
-                       if (copy_to_user((void *)data, &child->thread.fp,
+                       if (copy_to_user(datap, &child->thread.fp,
                                         sizeof(struct user_m68kfp_struct)))
                                ret = -EFAULT;
                        break;
@@ -223,7 +221,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_SETFPREGS
                case PTRACE_SETFPREGS: { /* Set the child FPU state. */
                        ret = 0;
-                       if (copy_from_user(&child->thread.fp, (void *)data,
+                       if (copy_from_user(&child->thread.fp, datap,
                                           sizeof(struct user_m68kfp_struct)))
                                ret = -EFAULT;
                        break;
@@ -231,8 +229,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #endif
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(task_thread_info(child)->tp_value,
-                              (unsigned long __user *)data);
+               ret = put_user(task_thread_info(child)->tp_value, datap);
                break;
 
                default:
index dc03ffc..05ac8cc 100644 (file)
@@ -73,7 +73,8 @@ static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
        return (microblaze_reg_t *)((char *)regs + reg_offs);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int rval;
        unsigned long val = 0;
@@ -99,7 +100,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        } else {
                                rval = -EIO;
                        }
-               } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
+               } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
                        microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
                        if (request == PTRACE_PEEKUSR)
                                val = *reg_addr;
index 5f4b9d4..f1f508e 100644 (file)
@@ -839,7 +839,7 @@ struct bridge_controller {
        nasid_t                 nasid;
        unsigned int            widget_id;
        unsigned int            irq_cpu;
-       dma64_addr_t            baddr;
+       u64                     baddr;
        unsigned int            pci_int[8];
 };
 
index c877733..d21c388 100644 (file)
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
        return 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       void __user *addrp = (void __user *) addr;
+       void __user *datavp = (void __user *) data;
+       unsigned long __user *datalp = (void __user *) data;
 
        switch (request) {
        /* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        ret = -EIO;
                        goto out;
                }
-               ret = put_user(tmp, (unsigned long __user *) data);
+               ret = put_user(tmp, datalp);
                break;
        }
 
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (__s64 __user *) data);
+               ret = ptrace_getregs(child, datavp);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (__s64 __user *) data);
+               ret = ptrace_setregs(child, datavp);
                break;
 
        case PTRACE_GETFPREGS:
-               ret = ptrace_getfpregs(child, (__u32 __user *) data);
+               ret = ptrace_getfpregs(child, datavp);
                break;
 
        case PTRACE_SETFPREGS:
-               ret = ptrace_setfpregs(child, (__u32 __user *) data);
+               ret = ptrace_setfpregs(child, datavp);
                break;
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(task_thread_info(child)->tp_value,
-                               (unsigned long __user *) data);
+               ret = put_user(task_thread_info(child)->tp_value, datalp);
                break;
 
        case PTRACE_GET_WATCH_REGS:
-               ret = ptrace_get_watch_regs(child,
-                                       (struct pt_watch_regs __user *) addr);
+               ret = ptrace_get_watch_regs(child, addrp);
                break;
 
        case PTRACE_SET_WATCH_REGS:
-               ret = ptrace_set_watch_regs(child,
-                                       (struct pt_watch_regs __user *) addr);
+               ret = ptrace_set_watch_regs(child, addrp);
                break;
 
        default:
index 1e69b1f..3634c7e 100644 (file)
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 #ifdef CONFIG_DEBUG_HIGHMEM
        {
                int idx = type + KM_TYPE_NR * smp_processor_id();
@@ -89,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                local_flush_tlb_one(vaddr);
        }
 #endif
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 3817d9f..bfe2d88 100644 (file)
@@ -101,7 +101,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 
 #if HIGHMEM_DEBUG
        {
@@ -119,6 +119,8 @@ static inline void __kunmap_atomic(unsigned long vaddr)
                local_flush_tlb_one(vaddr);
        }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 #endif /* __KERNEL__ */
index cf847da..5c0b07e 100644 (file)
@@ -295,31 +295,31 @@ void ptrace_disable(struct task_struct *child)
 /*
  * handle the arch-specific side of process tracing
  */
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR:
                ret = -EIO;
-               if ((addr & 3) || addr < 0 ||
-                   addr > sizeof(struct user) - 3)
+               if ((addr & 3) || addr > sizeof(struct user) - 3)
                        break;
 
                tmp = 0;  /* Default return condition */
                if (addr < NR_PTREGS << 2)
                        tmp = get_stack_long(child,
                                             ptrace_regid_to_frame[addr]);
-               ret = put_user(tmp, (unsigned long *) data);
+               ret = put_user(tmp, datap);
                break;
 
                /* write the word at location addr in the USER area */
        case PTRACE_POKEUSR:
                ret = -EIO;
-               if ((addr & 3) || addr < 0 ||
-                   addr > sizeof(struct user) - 3)
+               if ((addr & 3) || addr > sizeof(struct user) - 3)
                        break;
 
                ret = 0;
@@ -332,25 +332,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_mn10300_native_view,
                                           REGSET_GENERAL,
                                           0, NR_PTREGS * sizeof(long),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETREGS:    /* Set all integer regs in the child. */
                return copy_regset_from_user(child, &user_mn10300_native_view,
                                             REGSET_GENERAL,
                                             0, NR_PTREGS * sizeof(long),
-                                            (const void __user *)data);
+                                            datap);
 
        case PTRACE_GETFPREGS:  /* Get the child FPU state. */
                return copy_regset_to_user(child, &user_mn10300_native_view,
                                           REGSET_FPU,
                                           0, sizeof(struct fpu_state_struct),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETFPREGS:  /* Set the child FPU state. */
                return copy_regset_from_user(child, &user_mn10300_native_view,
                                             REGSET_FPU,
                                             0, sizeof(struct fpu_state_struct),
-                                            (const void __user *)data);
+                                            datap);
 
        default:
                ret = ptrace_request(child, request, addr, data);
index c4f49e4..2905b1f 100644 (file)
@@ -110,7 +110,8 @@ void user_enable_block_step(struct task_struct *task)
        pa_psw(task)->l = 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        long ret = -EIO;
@@ -120,11 +121,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        /* Read the word at location addr in the USER area.  For ptraced
           processes, the kernel saves all regs on a syscall. */
        case PTRACE_PEEKUSR:
-               if ((addr & (sizeof(long)-1)) ||
-                   (unsigned long) addr >= sizeof(struct pt_regs))
+               if ((addr & (sizeof(unsigned long)-1)) ||
+                    addr >= sizeof(struct pt_regs))
                        break;
                tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
-               ret = put_user(tmp, (unsigned long *) data);
+               ret = put_user(tmp, (unsigned long __user *) data);
                break;
 
        /* Write the word at location addr in the USER area.  This will need
@@ -151,8 +152,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        break;
                }
 
-               if ((addr & (sizeof(long)-1)) ||
-                   (unsigned long) addr >= sizeof(struct pt_regs))
+               if ((addr & (sizeof(unsigned long)-1)) ||
+                    addr >= sizeof(struct pt_regs))
                        break;
                if ((addr >= PT_GR1 && addr <= PT_GR31) ||
                                addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
index 8bdc6a9..1cf20bd 100644 (file)
@@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
 }
 
 /*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
  */
 extern u64 __cputime_msec_factor;
 
-static inline unsigned long cputime_to_msecs(const cputime_t ct)
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
 {
-       return mulhdu(ct, __cputime_msec_factor);
+       return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
 }
 
-static inline cputime_t msecs_to_cputime(const unsigned long ms)
+static inline cputime_t usecs_to_cputime(const unsigned long us)
 {
        cputime_t ct;
        unsigned long sec;
 
        /* have to be a little careful about overflow */
-       ct = ms % 1000;
-       sec = ms / 1000;
+       ct = us % 1000000;
+       sec = us / 1000000;
        if (ct) {
                ct *= tb_ticks_per_sec;
                do_div(ct, 1000);
index 286d978..a9b3296 100644 (file)
@@ -1406,37 +1406,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
  * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
  * we mark them as obsolete now, they will be removed in a future version
  */
-static long arch_ptrace_old(struct task_struct *child, long request, long addr,
-                           long data)
+static long arch_ptrace_old(struct task_struct *child, long request,
+                           unsigned long addr, unsigned long data)
 {
+       void __user *datavp = (void __user *) data;
+
        switch (request) {
        case PPC_PTRACE_GETREGS:        /* Get GPRs 0 - 31. */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_GPR, 0, 32 * sizeof(long),
-                                          (void __user *) data);
+                                          datavp);
 
        case PPC_PTRACE_SETREGS:        /* Set GPRs 0 - 31. */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_GPR, 0, 32 * sizeof(long),
-                                            (const void __user *) data);
+                                            datavp);
 
        case PPC_PTRACE_GETFPREGS:      /* Get FPRs 0 - 31. */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_FPR, 0, 32 * sizeof(double),
-                                          (void __user *) data);
+                                          datavp);
 
        case PPC_PTRACE_SETFPREGS:      /* Set FPRs 0 - 31. */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_FPR, 0, 32 * sizeof(double),
-                                            (const void __user *) data);
+                                            datavp);
        }
 
        return -EPERM;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret = -EPERM;
+       void __user *datavp = (void __user *) data;
+       unsigned long __user *datalp = datavp;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
@@ -1446,11 +1451,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = -EIO;
                /* convert to index and check */
 #ifdef CONFIG_PPC32
-               index = (unsigned long) addr >> 2;
+               index = addr >> 2;
                if ((addr & 3) || (index > PT_FPSCR)
                    || (child->thread.regs == NULL))
 #else
-               index = (unsigned long) addr >> 3;
+               index = addr >> 3;
                if ((addr & 7) || (index > PT_FPSCR))
 #endif
                        break;
@@ -1463,7 +1468,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        tmp = ((unsigned long *)child->thread.fpr)
                                [TS_FPRWIDTH * (index - PT_FPR0)];
                }
-               ret = put_user(tmp,(unsigned long __user *) data);
+               ret = put_user(tmp, datalp);
                break;
        }
 
@@ -1474,11 +1479,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = -EIO;
                /* convert to index and check */
 #ifdef CONFIG_PPC32
-               index = (unsigned long) addr >> 2;
+               index = addr >> 2;
                if ((addr & 3) || (index > PT_FPSCR)
                    || (child->thread.regs == NULL))
 #else
-               index = (unsigned long) addr >> 3;
+               index = addr >> 3;
                if ((addr & 7) || (index > PT_FPSCR))
 #endif
                        break;
@@ -1525,11 +1530,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                dbginfo.features = 0;
 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 
-               if (!access_ok(VERIFY_WRITE, data,
+               if (!access_ok(VERIFY_WRITE, datavp,
                               sizeof(struct ppc_debug_info)))
                        return -EFAULT;
-               ret = __copy_to_user((struct ppc_debug_info __user *)data,
-                                    &dbginfo, sizeof(struct ppc_debug_info)) ?
+               ret = __copy_to_user(datavp, &dbginfo,
+                                    sizeof(struct ppc_debug_info)) ?
                      -EFAULT : 0;
                break;
        }
@@ -1537,11 +1542,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PPC_PTRACE_SETHWDEBUG: {
                struct ppc_hw_breakpoint bp_info;
 
-               if (!access_ok(VERIFY_READ, data,
+               if (!access_ok(VERIFY_READ, datavp,
                               sizeof(struct ppc_hw_breakpoint)))
                        return -EFAULT;
-               ret = __copy_from_user(&bp_info,
-                                      (struct ppc_hw_breakpoint __user *)data,
+               ret = __copy_from_user(&bp_info, datavp,
                                       sizeof(struct ppc_hw_breakpoint)) ?
                      -EFAULT : 0;
                if (!ret)
@@ -1560,11 +1564,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                if (addr > 0)
                        break;
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-               ret = put_user(child->thread.dac1,
-                              (unsigned long __user *)data);
+               ret = put_user(child->thread.dac1, datalp);
 #else
-               ret = put_user(child->thread.dabr,
-                              (unsigned long __user *)data);
+               ret = put_user(child->thread.dabr, datalp);
 #endif
                break;
        }
@@ -1580,7 +1582,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_GPR,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *) data);
+                                          datavp);
 
 #ifdef CONFIG_PPC64
        case PTRACE_SETREGS64:
@@ -1589,19 +1591,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_GPR,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *) data);
+                                            datavp);
 
        case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_FPR,
                                           0, sizeof(elf_fpregset_t),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_FPR,
                                             0, sizeof(elf_fpregset_t),
-                                            (const void __user *) data);
+                                            datavp);
 
 #ifdef CONFIG_ALTIVEC
        case PTRACE_GETVRREGS:
@@ -1609,40 +1611,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                           REGSET_VMX,
                                           0, (33 * sizeof(vector128) +
                                               sizeof(u32)),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETVRREGS:
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_VMX,
                                             0, (33 * sizeof(vector128) +
                                                 sizeof(u32)),
-                                            (const void __user *) data);
+                                            datavp);
 #endif
 #ifdef CONFIG_VSX
        case PTRACE_GETVSRREGS:
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_VSX,
                                           0, 32 * sizeof(double),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETVSRREGS:
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_VSX,
                                             0, 32 * sizeof(double),
-                                            (const void __user *) data);
+                                            datavp);
 #endif
 #ifdef CONFIG_SPE
        case PTRACE_GETEVRREGS:
                /* Get the child spe register state. */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_SPE, 0, 35 * sizeof(u32),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETEVRREGS:
                /* Set the child spe register state. */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_SPE, 0, 35 * sizeof(u32),
-                                            (const void __user *) data);
+                                            datavp);
 #endif
 
        /* Old reverse args ptrace callss */
index b0848b4..e7450bd 100644 (file)
@@ -62,7 +62,7 @@ void __kunmap_atomic(void *kvaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
        {
@@ -79,6 +79,8 @@ void __kunmap_atomic(void *kvaddr)
                local_flush_tlb_page(NULL, vaddr);
        }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 4127636..9725369 100644 (file)
@@ -50,6 +50,7 @@
 #define RIO_ATMU_REGS_OFFSET   0x10c00
 #define RIO_P_MSG_REGS_OFFSET  0x11000
 #define RIO_S_MSG_REGS_OFFSET  0x13000
+#define RIO_GCCSR              0x13c
 #define RIO_ESCSR              0x158
 #define RIO_CCSR               0x15c
 #define RIO_LTLEDCSR           0x0608
@@ -87,6 +88,9 @@
 #define RIO_IPWSR_PWD          0x00000008
 #define RIO_IPWSR_PWB          0x00000004
 
+#define RIO_EPWISR_PINT                0x80000000
+#define RIO_EPWISR_PW          0x00000001
+
 #define RIO_MSG_DESC_SIZE      32
 #define RIO_MSG_BUFFER_SIZE    4096
 #define RIO_MIN_TX_RING_SIZE   2
@@ -1082,18 +1086,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
        struct rio_priv *priv = port->priv;
        u32 epwisr, tmp;
 
-       ipwmr = in_be32(&priv->msg_regs->pwmr);
-       ipwsr = in_be32(&priv->msg_regs->pwsr);
-
        epwisr = in_be32(priv->regs_win + RIO_EPWISR);
-       if (epwisr & 0x80000000) {
-               tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
-               pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
-               out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
-       }
+       if (!(epwisr & RIO_EPWISR_PW))
+               goto pw_done;
 
-       if (!(epwisr & 0x00000001))
-               return IRQ_HANDLED;
+       ipwmr = in_be32(&priv->msg_regs->pwmr);
+       ipwsr = in_be32(&priv->msg_regs->pwsr);
 
 #ifdef DEBUG_PW
        pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
@@ -1109,20 +1107,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
                pr_debug(" PWB");
        pr_debug(" )\n");
 #endif
-       out_be32(&priv->msg_regs->pwsr,
-                ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
-       if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
-               priv->port_write_msg.err_count++;
-               pr_info("RIO: Port-Write Transaction Err (%d)\n",
-                        priv->port_write_msg.err_count);
-       }
-       if (ipwsr & RIO_IPWSR_PWD) {
-               priv->port_write_msg.discard_count++;
-               pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
-                        priv->port_write_msg.discard_count);
-       }
-
        /* Schedule deferred processing if PW was received */
        if (ipwsr & RIO_IPWSR_QFI) {
                /* Save PW message (if there is room in FIFO),
@@ -1134,16 +1118,43 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
                                 RIO_PW_MSG_SIZE);
                } else {
                        priv->port_write_msg.discard_count++;
-                       pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+                       pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
                                 priv->port_write_msg.discard_count);
                }
+               /* Clear interrupt and issue Clear Queue command. This allows
+                * another port-write to be received.
+                */
+               out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
+               out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
                schedule_work(&priv->pw_work);
        }
 
-       /* Issue Clear Queue command. This allows another
-        * port-write to be received.
-        */
-       out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+       if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+               priv->port_write_msg.err_count++;
+               pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+                        priv->port_write_msg.err_count);
+               /* Clear Transaction Error: port-write controller should be
+                * disabled when clearing this error
+                */
+               out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+               out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
+               out_be32(&priv->msg_regs->pwmr, ipwmr);
+       }
+
+       if (ipwsr & RIO_IPWSR_PWD) {
+               priv->port_write_msg.discard_count++;
+               pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+                        priv->port_write_msg.discard_count);
+               out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
+       }
+
+pw_done:
+       if (epwisr & RIO_EPWISR_PINT) {
+               tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+               pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+               out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+       }
 
        return IRQ_HANDLED;
 }
@@ -1461,6 +1472,7 @@ int fsl_rio_setup(struct platform_device *dev)
        port->host_deviceid = fsl_rio_get_hdid(port->id);
 
        port->priv = priv;
+       port->phys_efptr = 0x100;
        rio_register_mport(port);
 
        priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
@@ -1508,6 +1520,12 @@ int fsl_rio_setup(struct platform_device *dev)
        dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
                        port->sys_size ? 65536 : 256);
 
+       if (port->host_deviceid >= 0)
+               out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+                       RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+       else
+               out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
+
        priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
                                        + RIO_ATMU_REGS_OFFSET);
        priv->maint_atmu_regs = priv->atmu_regs + 1;
index 8b1a52a..40e2ab0 100644 (file)
@@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime)
 }
 
 /*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
  */
 static inline unsigned int
-cputime_to_msecs(const cputime_t cputime)
+cputime_to_usecs(const cputime_t cputime)
 {
-       return cputime_div(cputime, 4096000);
+       return cputime_div(cputime, 4096);
 }
 
 static inline cputime_t
-msecs_to_cputime(const unsigned int m)
+usecs_to_cputime(const unsigned int m)
 {
-       return (cputime_t) m * 4096000;
+       return (cputime_t) m * 4096;
 }
 
 /*
index 83339d3..019bb71 100644 (file)
@@ -343,7 +343,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
        return __poke_user(child, addr, data);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        ptrace_area parea; 
        int copied, ret;
index 174c642..5583618 100644 (file)
@@ -325,7 +325,8 @@ void ptrace_disable(struct task_struct *child)
 }
 
 long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+           unsigned long addr, unsigned long data)
 {
        int ret;
        unsigned long __user *datap = (void __user *)data;
@@ -335,14 +336,14 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = copy_regset_to_user(child, &user_score_native_view,
                                                REGSET_GENERAL,
                                                0, sizeof(struct pt_regs),
-                                               (void __user *)datap);
+                                               datap);
                break;
 
        case PTRACE_SETREGS:
                ret = copy_regset_from_user(child, &user_score_native_view,
                                                REGSET_GENERAL,
                                                0, sizeof(struct pt_regs),
-                                               (const void __user *)datap);
+                                               datap);
                break;
 
        default:
index 2cd42b5..90a15d2 100644 (file)
@@ -365,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
        return &user_sh_native_view;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
-       struct user * dummy = NULL;
        unsigned long __user *datap = (unsigned long __user *)data;
        int ret;
 
@@ -383,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                if (addr < sizeof(struct pt_regs))
                        tmp = get_stack_long(child, addr);
-               else if (addr >= (long) &dummy->fpu &&
-                        addr < (long) &dummy->u_fpvalid) {
+               else if (addr >= offsetof(struct user, fpu) &&
+                        addr < offsetof(struct user, u_fpvalid)) {
                        if (!tsk_used_math(child)) {
-                               if (addr == (long)&dummy->fpu.fpscr)
+                               if (addr == offsetof(struct user, fpu.fpscr))
                                        tmp = FPSCR_INIT;
                                else
                                        tmp = 0;
-                       } else
-                               tmp = ((long *)child->thread.xstate)
-                                       [(addr - (long)&dummy->fpu) >> 2];
-               } else if (addr == (long) &dummy->u_fpvalid)
+                       } else {
+                               unsigned long index;
+                               index = addr - offsetof(struct user, fpu);
+                               tmp = ((unsigned long *)child->thread.xstate)
+                                       [index >> 2];
+                       }
+               } else if (addr == offsetof(struct user, u_fpvalid))
                        tmp = !!tsk_used_math(child);
                else if (addr == PT_TEXT_ADDR)
                        tmp = child->mm->start_code;
@@ -417,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                if (addr < sizeof(struct pt_regs))
                        ret = put_stack_long(child, addr, data);
-               else if (addr >= (long) &dummy->fpu &&
-                        addr < (long) &dummy->u_fpvalid) {
+               else if (addr >= offsetof(struct user, fpu) &&
+                        addr < offsetof(struct user, u_fpvalid)) {
+                       unsigned long index;
+                       index = addr - offsetof(struct user, fpu);
                        set_stopped_child_used_math(child);
-                       ((long *)child->thread.xstate)
-                               [(addr - (long)&dummy->fpu) >> 2] = data;
+                       ((unsigned long *)child->thread.xstate)
+                               [index >> 2] = data;
                        ret = 0;
-               } else if (addr == (long) &dummy->u_fpvalid) {
+               } else if (addr == offsetof(struct user, u_fpvalid)) {
                        conditional_stopped_child_used_math(data, child);
                        ret = 0;
                }
@@ -433,35 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_sh_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETREGS:
                return copy_regset_from_user(child, &user_sh_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *)data);
+                                            datap);
 #ifdef CONFIG_SH_FPU
        case PTRACE_GETFPREGS:
                return copy_regset_to_user(child, &user_sh_native_view,
                                           REGSET_FPU,
                                           0, sizeof(struct user_fpu_struct),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETFPREGS:
                return copy_regset_from_user(child, &user_sh_native_view,
                                             REGSET_FPU,
                                             0, sizeof(struct user_fpu_struct),
-                                            (const void __user *)data);
+                                            datap);
 #endif
 #ifdef CONFIG_SH_DSP
        case PTRACE_GETDSPREGS:
                return copy_regset_to_user(child, &user_sh_native_view,
                                           REGSET_DSP,
                                           0, sizeof(struct pt_dspregs),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETDSPREGS:
                return copy_regset_from_user(child, &user_sh_native_view,
                                             REGSET_DSP,
                                             0, sizeof(struct pt_dspregs),
-                                            (const void __user *)data);
+                                            datap);
 #endif
        default:
                ret = ptrace_request(child, request, addr, data);
index e0fb065..4436eac 100644 (file)
@@ -383,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
        return &user_sh64_native_view;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
@@ -400,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        tmp = get_stack_long(child, addr);
                else if ((addr >= offsetof(struct user, fpu)) &&
                         (addr <  offsetof(struct user, u_fpvalid))) {
-                       tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+                       unsigned long index;
+                       index = addr - offsetof(struct user, fpu);
+                       tmp = get_fpu_long(child, index);
                } else if (addr == offsetof(struct user, u_fpvalid)) {
                        tmp = !!tsk_used_math(child);
                } else {
                        break;
                }
-               ret = put_user(tmp, (unsigned long *)data);
+               ret = put_user(tmp, datap);
                break;
        }
 
@@ -437,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
                else if ((addr >= offsetof(struct user, fpu)) &&
                         (addr <  offsetof(struct user, u_fpvalid))) {
-                       ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+                       unsigned long index;
+                       index = addr - offsetof(struct user, fpu);
+                       ret = put_fpu_long(child, index, data);
                }
                break;
 
@@ -445,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_sh64_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETREGS:
                return copy_regset_from_user(child, &user_sh64_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *)data);
+                                            datap);
 #ifdef CONFIG_SH_FPU
        case PTRACE_GETFPREGS:
                return copy_regset_to_user(child, &user_sh64_native_view,
                                           REGSET_FPU,
                                           0, sizeof(struct user_fpu_struct),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETFPREGS:
                return copy_regset_from_user(child, &user_sh64_native_view,
                                             REGSET_FPU,
                                             0, sizeof(struct user_fpu_struct),
-                                            (const void __user *)data);
+                                            datap);
 #endif
        default:
                ret = ptrace_request(child, request, addr, data);
@@ -471,7 +477,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        return ret;
 }
 
-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+asmlinkage int sh64_ptrace(long request, long pid,
+                          unsigned long addr, unsigned long data)
 {
 #define WPC_DBRMODE 0x0d104008
        static unsigned long first_call;
index 2889574..c2ced21 100644 (file)
@@ -207,6 +207,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
 
 #define memset_io(d,c,sz)      _memset_io(d,c,sz)
 
+static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+                   __kernel_size_t n)
+{
+       char *d = dst;
+
+       while (n--) {
+               char tmp = sbus_readb(src);
+               *d++ = tmp;
+               src++;
+       }
+}
+
+#define sbus_memcpy_fromio(d, s, sz)   _sbus_memcpy_fromio(d, s, sz)
+
 static inline void
 _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 {
@@ -221,6 +236,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 
 #define memcpy_fromio(d,s,sz)  _memcpy_fromio(d,s,sz)
 
+static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+                 __kernel_size_t n)
+{
+       const char *s = src;
+       volatile void __iomem *d = dst;
+
+       while (n--) {
+               char tmp = *s++;
+               sbus_writeb(tmp, d);
+               d++;
+       }
+}
+
+#define sbus_memcpy_toio(d, s, sz)     _sbus_memcpy_toio(d, s, sz)
+
 static inline void
 _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
 {
index 9517d06..9c89654 100644 (file)
@@ -418,6 +418,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
 
 #define memset_io(d,c,sz)      _memset_io(d,c,sz)
 
+static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+                   __kernel_size_t n)
+{
+       char *d = dst;
+
+       while (n--) {
+               char tmp = sbus_readb(src);
+               *d++ = tmp;
+               src++;
+       }
+}
+
+#define sbus_memcpy_fromio(d, s, sz)   _sbus_memcpy_fromio(d, s, sz)
+
 static inline void
 _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 {
@@ -432,6 +447,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 
 #define memcpy_fromio(d,s,sz)  _memcpy_fromio(d,s,sz)
 
+static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+                 __kernel_size_t n)
+{
+       const char *s = src;
+       volatile void __iomem *d = dst;
+
+       while (n--) {
+               char tmp = *s++;
+               sbus_writeb(tmp, d);
+               d++;
+       }
+}
+
+#define sbus_memcpy_toio(d, s, sz)     _sbus_memcpy_toio(d, s, sz)
+
 static inline void
 _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
 {
index 5312782..948b686 100644 (file)
@@ -38,7 +38,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
  * types on sparc64.  However, it requires that the device
  * can drive enough of the 64 bits.
  */
-#define PCI64_REQUIRED_MASK    (~(dma64_addr_t)0)
+#define PCI64_REQUIRED_MASK    (~(u64)0)
 #define PCI64_ADDR_BASE                0xfffc000000000000UL
 
 #ifdef CONFIG_PCI
index e608f39..27b9e93 100644 (file)
@@ -323,18 +323,35 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
        return &user_sparc32_view;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+struct fps {
+       unsigned long regs[32];
+       unsigned long fsr;
+       unsigned long flags;
+       unsigned long extra;
+       unsigned long fpqd;
+       struct fq {
+               unsigned long *insnaddr;
+               unsigned long insn;
+       } fpq[16];
+};
+
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
+       void __user *addr2p;
        const struct user_regset_view *view;
+       struct pt_regs __user *pregs;
+       struct fps __user *fps;
        int ret;
 
        view = task_user_regset_view(current);
+       addr2p = (void __user *) addr2;
+       pregs = (struct pt_regs __user *) addr;
+       fps = (struct fps __user *) addr;
 
        switch(request) {
        case PTRACE_GETREGS: {
-               struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
                ret = copy_regset_to_user(child, view, REGSET_GENERAL,
                                          32 * sizeof(u32),
                                          4 * sizeof(u32),
@@ -348,8 +365,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        }
 
        case PTRACE_SETREGS: {
-               struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
                ret = copy_regset_from_user(child, view, REGSET_GENERAL,
                                            32 * sizeof(u32),
                                            4 * sizeof(u32),
@@ -363,19 +378,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        }
 
        case PTRACE_GETFPREGS: {
-               struct fps {
-                       unsigned long regs[32];
-                       unsigned long fsr;
-                       unsigned long flags;
-                       unsigned long extra;
-                       unsigned long fpqd;
-                       struct fq {
-                               unsigned long *insnaddr;
-                               unsigned long insn;
-                       } fpq[16];
-               };
-               struct fps __user *fps = (struct fps __user *) addr;
-
                ret = copy_regset_to_user(child, view, REGSET_FP,
                                          0 * sizeof(u32),
                                          32 * sizeof(u32),
@@ -397,19 +399,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        }
 
        case PTRACE_SETFPREGS: {
-               struct fps {
-                       unsigned long regs[32];
-                       unsigned long fsr;
-                       unsigned long flags;
-                       unsigned long extra;
-                       unsigned long fpqd;
-                       struct fq {
-                               unsigned long *insnaddr;
-                               unsigned long insn;
-                       } fpq[16];
-               };
-               struct fps __user *fps = (struct fps __user *) addr;
-
                ret = copy_regset_from_user(child, view, REGSET_FP,
                                            0 * sizeof(u32),
                                            32 * sizeof(u32),
@@ -424,8 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_READTEXT:
        case PTRACE_READDATA:
-               ret = ptrace_readdata(child, addr,
-                                     (void __user *) addr2, data);
+               ret = ptrace_readdata(child, addr, addr2p, data);
 
                if (ret == data)
                        ret = 0;
@@ -435,8 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_WRITETEXT:
        case PTRACE_WRITEDATA:
-               ret = ptrace_writedata(child, (void __user *) addr2,
-                                      addr, data);
+               ret = ptrace_writedata(child, addr2p, addr, data);
 
                if (ret == data)
                        ret = 0;
index aa90da0..9ccc812 100644 (file)
@@ -969,16 +969,19 @@ struct fps {
        unsigned long fsr;
 };
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        const struct user_regset_view *view = task_user_regset_view(current);
        unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
        struct pt_regs __user *pregs;
        struct fps __user *fps;
+       void __user *addr2p;
        int ret;
 
-       pregs = (struct pt_regs __user *) (unsigned long) addr;
-       fps = (struct fps __user *) (unsigned long) addr;
+       pregs = (struct pt_regs __user *) addr;
+       fps = (struct fps __user *) addr;
+       addr2p = (void __user *) addr2;
 
        switch (request) {
        case PTRACE_PEEKUSR:
@@ -1029,8 +1032,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_READTEXT:
        case PTRACE_READDATA:
-               ret = ptrace_readdata(child, addr,
-                                     (char __user *)addr2, data);
+               ret = ptrace_readdata(child, addr, addr2p, data);
                if (ret == data)
                        ret = 0;
                else if (ret >= 0)
@@ -1039,8 +1041,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_WRITETEXT:
        case PTRACE_WRITEDATA:
-               ret = ptrace_writedata(child, (char __user *) addr2,
-                                      addr, data);
+               ret = ptrace_writedata(child, addr2p, addr, data);
                if (ret == data)
                        ret = 0;
                else if (ret >= 0)
index 5e50c09..4730eac 100644 (file)
@@ -75,7 +75,7 @@ void __kunmap_atomic(void *kvaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
        {
@@ -104,6 +104,8 @@ void __kunmap_atomic(void *kvaddr)
 #endif
        }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 89cfee0..7e8c284 100644 (file)
@@ -58,6 +58,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool y
 
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool y
+
 config LOCKDEP_SUPPORT
        def_bool y
 
index 5b20c28..9cd2988 100644 (file)
@@ -45,7 +45,8 @@ void ptrace_disable(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long __user *datap = (long __user __force *)data;
        unsigned long tmp;
@@ -57,7 +58,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        switch (request) {
 
        case PTRACE_PEEKUSR:  /* Read register from pt_regs. */
-               if (addr < 0 || addr >= PTREGS_SIZE)
+               if (addr >= PTREGS_SIZE)
                        break;
                childreg = (char *)task_pt_regs(child) + addr;
 #ifdef CONFIG_COMPAT
@@ -76,7 +77,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_POKEUSR:  /* Write register in pt_regs. */
-               if (addr < 0 || addr >= PTREGS_SIZE)
+               if (addr >= PTREGS_SIZE)
                        break;
                childreg = (char *)task_pt_regs(child) + addr;
 #ifdef CONFIG_COMPAT
@@ -98,7 +99,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
                        break;
                childregs = (long *)task_pt_regs(child);
-               for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+               for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
+                               ++i) {
                        ret = __put_user(childregs[i], &datap[i]);
                        if (ret != 0)
                                break;
@@ -109,7 +111,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
                        break;
                childregs = (long *)task_pt_regs(child);
-               for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+               for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
+                               ++i) {
                        ret = __get_user(childregs[i], &datap[i]);
                        if (ret != 0)
                                break;
index f3a50e7..ae51cad 100644 (file)
@@ -30,8 +30,6 @@
 #include <linux/timex.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
index 8ef6595..abb5733 100644 (file)
@@ -241,7 +241,7 @@ void __kunmap_atomic(void *kvaddr)
                pte_t pteval = *pte;
                int idx, type;
 
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR*smp_processor_id();
 
                /*
@@ -252,6 +252,7 @@ void __kunmap_atomic(void *kvaddr)
                BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
                kmap_atomic_unregister(pte_page(pteval), vaddr);
                kpte_clear_flush(pte, vaddr);
+               kmap_atomic_idx_pop();
        } else {
                /* Must be a lowmem page */
                BUG_ON(vaddr < PAGE_OFFSET);
index e051049..a5e33f2 100644 (file)
@@ -42,10 +42,12 @@ void ptrace_disable(struct task_struct *child)
 extern int peek_user(struct task_struct * child, long addr, long data);
 extern int poke_user(struct task_struct * child, long addr, long data);
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int i, ret;
-       unsigned long __user *p = (void __user *)(unsigned long)data;
+       unsigned long __user *p = (void __user *)data;
+       void __user *vp = p;
 
        switch (request) {
        /* read word at location addr. */
@@ -107,24 +109,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #endif
 #ifdef PTRACE_GETFPREGS
        case PTRACE_GETFPREGS: /* Get the child FPU state. */
-               ret = get_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = get_fpregs(vp, child);
                break;
 #endif
 #ifdef PTRACE_SETFPREGS
        case PTRACE_SETFPREGS: /* Set the child FPU state. */
-               ret = set_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = set_fpregs(vp, child);
                break;
 #endif
        case PTRACE_GET_THREAD_AREA:
-               ret = ptrace_get_thread_area(child, addr,
-                                            (struct user_desc __user *) data);
+               ret = ptrace_get_thread_area(child, addr, vp);
                break;
 
        case PTRACE_SET_THREAD_AREA:
-               ret = ptrace_set_thread_area(child, addr,
-                                            (struct user_desc __user *) data);
+               ret = ptrace_set_thread_area(child, addr, datavp);
                break;
 
        case PTRACE_FAULTINFO: {
@@ -134,7 +132,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                 * On i386, ptrace_faultinfo is smaller!
                 */
                ret = copy_to_user(p, &child->thread.arch.faultinfo,
-                                  sizeof(struct ptrace_faultinfo));
+                                  sizeof(struct ptrace_faultinfo)) ?
+                       -EIO : 0;
                break;
        }
 
@@ -158,7 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_ARCH_PRCTL
        case PTRACE_ARCH_PRCTL:
                /* XXX Calls ptrace on the host - needs some SMP thinking */
-               ret = arch_prctl(child, data, (void *) addr);
+               ret = arch_prctl(child, data, (void __user *) addr);
                break;
 #endif
        default:
index c9b1765..d23b2d3 100644 (file)
@@ -203,8 +203,8 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
                                     (unsigned long *) &fpregs);
 }
 
-long subarch_ptrace(struct task_struct *child, long request, long addr,
-                   long data)
+long subarch_ptrace(struct task_struct *child, long request,
+                   unsigned long addr, unsigned long data)
 {
        return -EIO;
 }
index f3458d7..f436136 100644 (file)
@@ -175,19 +175,18 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
        return restore_fp_registers(userspace_pid[cpu], fpregs);
 }
 
-long subarch_ptrace(struct task_struct *child, long request, long addr,
-                   long data)
+long subarch_ptrace(struct task_struct *child, long request,
+                   unsigned long addr, unsigned long data)
 {
        int ret = -EIO;
+       void __user *datap = (void __user *) data;
 
        switch (request) {
        case PTRACE_GETFPXREGS: /* Get the child FPU state. */
-               ret = get_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = get_fpregs(datap, child);
                break;
        case PTRACE_SETFPXREGS: /* Set the child FPU state. */
-               ret = set_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = set_fpregs(datap, child);
                break;
        }
 
index dfabfef..299fbc8 100644 (file)
@@ -347,6 +347,7 @@ endif
 
 config X86_VSMP
        bool "ScaleMP vSMP"
+       select PARAVIRT_GUEST
        select PARAVIRT
        depends on X86_64 && PCI
        depends on X86_EXTENDED_PLATFORM
index 0bf5b00..13b0eba 100644 (file)
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
 
 #ifdef CONFIG_X86_32
 extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
 #else
 # define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
 #endif
 
 #define __ARCH_HAS_DO_SOFTIRQ
index 83c4bb1..3ea3dc4 100644 (file)
 #define MSR_AMD64_IBSDCLINAD           0xc0011038
 #define MSR_AMD64_IBSDCPHYSAD          0xc0011039
 #define MSR_AMD64_IBSCTL               0xc001103a
+#define MSR_AMD64_IBSBRTARGET          0xc001103b
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
index 6e742cc..550e26b 100644 (file)
@@ -111,17 +111,18 @@ union cpuid10_edx {
 #define X86_PMC_IDX_FIXED_BTS                          (X86_PMC_IDX_FIXED + 16)
 
 /* IbsFetchCtl bits/masks */
-#define IBS_FETCH_RAND_EN              (1ULL<<57)
-#define IBS_FETCH_VAL                  (1ULL<<49)
-#define IBS_FETCH_ENABLE               (1ULL<<48)
-#define IBS_FETCH_CNT                  0xFFFF0000ULL
-#define IBS_FETCH_MAX_CNT              0x0000FFFFULL
+#define IBS_FETCH_RAND_EN      (1ULL<<57)
+#define IBS_FETCH_VAL          (1ULL<<49)
+#define IBS_FETCH_ENABLE       (1ULL<<48)
+#define IBS_FETCH_CNT          0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT      0x0000FFFFULL
 
 /* IbsOpCtl bits */
-#define IBS_OP_CNT_CTL                 (1ULL<<19)
-#define IBS_OP_VAL                     (1ULL<<18)
-#define IBS_OP_ENABLE                  (1ULL<<17)
-#define IBS_OP_MAX_CNT                 0x0000FFFFULL
+#define IBS_OP_CNT_CTL         (1ULL<<19)
+#define IBS_OP_VAL             (1ULL<<18)
+#define IBS_OP_ENABLE          (1ULL<<17)
+#define IBS_OP_MAX_CNT         0x0000FFFFULL
+#define IBS_OP_MAX_CNT_EXT     0x007FFFFFULL   /* not a register bit mask */
 
 #ifdef CONFIG_PERF_EVENTS
 extern void init_hw_perf_events(void);
index 4cfc908..4c2f63c 100644 (file)
@@ -50,7 +50,7 @@ struct smp_ops {
        void (*smp_prepare_cpus)(unsigned max_cpus);
        void (*smp_cpus_done)(unsigned max_cpus);
 
-       void (*smp_send_stop)(void);
+       void (*stop_other_cpus)(int wait);
        void (*smp_send_reschedule)(int cpu);
 
        int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
 
 static inline void smp_send_stop(void)
 {
-       smp_ops.smp_send_stop();
+       smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+       smp_ops.stop_other_cpus(1);
 }
 
 static inline void smp_prepare_boot_cpu(void)
index c1e8c7a..ed63101 100644 (file)
@@ -237,6 +237,7 @@ struct x86_pmu {
         * Intel DebugStore bits
         */
        int             bts, pebs;
+       int             bts_active, pebs_active;
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
@@ -380,7 +381,7 @@ static void release_pmc_hardware(void) {}
 
 #endif
 
-static int reserve_ds_buffers(void);
+static void reserve_ds_buffers(void);
 static void release_ds_buffers(void);
 
 static void hw_perf_event_destroy(struct perf_event *event)
@@ -477,7 +478,7 @@ static int x86_setup_perfctr(struct perf_event *event)
        if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
            (hwc->sample_period == 1)) {
                /* BTS is not supported by this architecture. */
-               if (!x86_pmu.bts)
+               if (!x86_pmu.bts_active)
                        return -EOPNOTSUPP;
 
                /* BTS is currently only allowed for user-mode. */
@@ -496,12 +497,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
                int precise = 0;
 
                /* Support for constant skid */
-               if (x86_pmu.pebs)
+               if (x86_pmu.pebs_active) {
                        precise++;
 
-               /* Support for IP fixup */
-               if (x86_pmu.lbr_nr)
-                       precise++;
+                       /* Support for IP fixup */
+                       if (x86_pmu.lbr_nr)
+                               precise++;
+               }
 
                if (event->attr.precise_ip > precise)
                        return -EOPNOTSUPP;
@@ -543,11 +545,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
                if (atomic_read(&active_events) == 0) {
                        if (!reserve_pmc_hardware())
                                err = -EBUSY;
-                       else {
-                               err = reserve_ds_buffers();
-                               if (err)
-                                       release_pmc_hardware();
-                       }
+                       else
+                               reserve_ds_buffers();
                }
                if (!err)
                        atomic_inc(&active_events);
index 4977f9c..b7dcd9f 100644 (file)
@@ -74,6 +74,107 @@ static void fini_debug_store_on_cpu(int cpu)
        wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
 }
 
+static int alloc_pebs_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       int node = cpu_to_node(cpu);
+       int max, thresh = 1; /* always use a single PEBS record */
+       void *buffer;
+
+       if (!x86_pmu.pebs)
+               return 0;
+
+       buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+       if (unlikely(!buffer))
+               return -ENOMEM;
+
+       max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+       ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+       ds->pebs_index = ds->pebs_buffer_base;
+       ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+               max * x86_pmu.pebs_record_size;
+
+       ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+               thresh * x86_pmu.pebs_record_size;
+
+       return 0;
+}
+
+static void release_pebs_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds || !x86_pmu.pebs)
+               return;
+
+       kfree((void *)(unsigned long)ds->pebs_buffer_base);
+       ds->pebs_buffer_base = 0;
+}
+
+static int alloc_bts_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       int node = cpu_to_node(cpu);
+       int max, thresh;
+       void *buffer;
+
+       if (!x86_pmu.bts)
+               return 0;
+
+       buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+       if (unlikely(!buffer))
+               return -ENOMEM;
+
+       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+       thresh = max / 16;
+
+       ds->bts_buffer_base = (u64)(unsigned long)buffer;
+       ds->bts_index = ds->bts_buffer_base;
+       ds->bts_absolute_maximum = ds->bts_buffer_base +
+               max * BTS_RECORD_SIZE;
+       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+               thresh * BTS_RECORD_SIZE;
+
+       return 0;
+}
+
+static void release_bts_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds || !x86_pmu.bts)
+               return;
+
+       kfree((void *)(unsigned long)ds->bts_buffer_base);
+       ds->bts_buffer_base = 0;
+}
+
+static int alloc_ds_buffer(int cpu)
+{
+       int node = cpu_to_node(cpu);
+       struct debug_store *ds;
+
+       ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
+       if (unlikely(!ds))
+               return -ENOMEM;
+
+       per_cpu(cpu_hw_events, cpu).ds = ds;
+
+       return 0;
+}
+
+static void release_ds_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds)
+               return;
+
+       per_cpu(cpu_hw_events, cpu).ds = NULL;
+       kfree(ds);
+}
+
 static void release_ds_buffers(void)
 {
        int cpu;
@@ -82,93 +183,77 @@ static void release_ds_buffers(void)
                return;
 
        get_online_cpus();
-
        for_each_online_cpu(cpu)
                fini_debug_store_on_cpu(cpu);
 
        for_each_possible_cpu(cpu) {
-               struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
-               if (!ds)
-                       continue;
-
-               per_cpu(cpu_hw_events, cpu).ds = NULL;
-
-               kfree((void *)(unsigned long)ds->pebs_buffer_base);
-               kfree((void *)(unsigned long)ds->bts_buffer_base);
-               kfree(ds);
+               release_pebs_buffer(cpu);
+               release_bts_buffer(cpu);
+               release_ds_buffer(cpu);
        }
-
        put_online_cpus();
 }
 
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
 {
-       int cpu, err = 0;
+       int bts_err = 0, pebs_err = 0;
+       int cpu;
+
+       x86_pmu.bts_active = 0;
+       x86_pmu.pebs_active = 0;
 
        if (!x86_pmu.bts && !x86_pmu.pebs)
-               return 0;
+               return;
+
+       if (!x86_pmu.bts)
+               bts_err = 1;
+
+       if (!x86_pmu.pebs)
+               pebs_err = 1;
 
        get_online_cpus();
 
        for_each_possible_cpu(cpu) {
-               struct debug_store *ds;
-               void *buffer;
-               int max, thresh;
+               if (alloc_ds_buffer(cpu)) {
+                       bts_err = 1;
+                       pebs_err = 1;
+               }
+
+               if (!bts_err && alloc_bts_buffer(cpu))
+                       bts_err = 1;
 
-               err = -ENOMEM;
-               ds = kzalloc(sizeof(*ds), GFP_KERNEL);
-               if (unlikely(!ds))
+               if (!pebs_err && alloc_pebs_buffer(cpu))
+                       pebs_err = 1;
+
+               if (bts_err && pebs_err)
                        break;
-               per_cpu(cpu_hw_events, cpu).ds = ds;
-
-               if (x86_pmu.bts) {
-                       buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
-                       if (unlikely(!buffer))
-                               break;
-
-                       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
-                       thresh = max / 16;
-
-                       ds->bts_buffer_base = (u64)(unsigned long)buffer;
-                       ds->bts_index = ds->bts_buffer_base;
-                       ds->bts_absolute_maximum = ds->bts_buffer_base +
-                               max * BTS_RECORD_SIZE;
-                       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
-                               thresh * BTS_RECORD_SIZE;
-               }
+       }
 
-               if (x86_pmu.pebs) {
-                       buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
-                       if (unlikely(!buffer))
-                               break;
-
-                       max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-
-                       ds->pebs_buffer_base = (u64)(unsigned long)buffer;
-                       ds->pebs_index = ds->pebs_buffer_base;
-                       ds->pebs_absolute_maximum = ds->pebs_buffer_base +
-                               max * x86_pmu.pebs_record_size;
-                       /*
-                        * Always use single record PEBS
-                        */
-                       ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
-                               x86_pmu.pebs_record_size;
-               }
+       if (bts_err) {
+               for_each_possible_cpu(cpu)
+                       release_bts_buffer(cpu);
+       }
 
-               err = 0;
+       if (pebs_err) {
+               for_each_possible_cpu(cpu)
+                       release_pebs_buffer(cpu);
        }
 
-       if (err)
-               release_ds_buffers();
-       else {
+       if (bts_err && pebs_err) {
+               for_each_possible_cpu(cpu)
+                       release_ds_buffer(cpu);
+       } else {
+               if (x86_pmu.bts && !bts_err)
+                       x86_pmu.bts_active = 1;
+
+               if (x86_pmu.pebs && !pebs_err)
+                       x86_pmu.pebs_active = 1;
+
                for_each_online_cpu(cpu)
                        init_debug_store_on_cpu(cpu);
        }
 
        put_online_cpus();
-
-       return err;
 }
 
 /*
@@ -233,7 +318,7 @@ static int intel_pmu_drain_bts_buffer(void)
        if (!event)
                return 0;
 
-       if (!ds)
+       if (!x86_pmu.bts_active)
                return 0;
 
        at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -503,7 +588,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
        struct pebs_record_core *at, *top;
        int n;
 
-       if (!ds || !x86_pmu.pebs)
+       if (!x86_pmu.pebs_active)
                return;
 
        at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -545,7 +630,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
        u64 status = 0;
        int bit, n;
 
-       if (!ds || !x86_pmu.pebs)
+       if (!x86_pmu.pebs_active)
                return;
 
        at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
@@ -630,9 +715,8 @@ static void intel_ds_init(void)
 
 #else /* CONFIG_CPU_SUP_INTEL */
 
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
 {
-       return 0;
 }
 
 static void release_ds_buffers(void)
index 0f6376f..1bc7f75 100644 (file)
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (kstack_end(stack))
                        break;
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-                       printk("\n%s", log_lvl);
-               printk(" %08lx", *stack++);
+                       printk(KERN_CONT "\n");
+               printk(KERN_CONT " %08lx", *stack++);
                touch_nmi_watchdog();
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
index 57a21f1..6a34048 100644 (file)
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (stack >= irq_stack && stack <= irq_stack_end) {
                        if (stack == irq_stack_end) {
                                stack = (unsigned long *) (irq_stack_end[-1]);
-                               printk(" <EOI> ");
+                               printk(KERN_CONT " <EOI> ");
                        }
                } else {
                if (((long) stack & (THREAD_SIZE-1)) == 0)
                        break;
                }
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-                       printk("\n%s", log_lvl);
-               printk(" %016lx", *stack++);
+                       printk(KERN_CONT "\n");
+               printk(KERN_CONT " %016lx", *stack++);
                touch_nmi_watchdog();
        }
        preempt_enable();
 
-       printk("\n");
+       printk(KERN_CONT "\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
index 50fbbe6..64668db 100644 (file)
@@ -60,9 +60,6 @@ union irq_ctx {
 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
 
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
 static void call_on_stack(void *func, void *stack)
 {
        asm volatile("xchgl     %%ebx,%%esp     \n"
@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
        if (per_cpu(hardirq_ctx, cpu))
                return;
 
-       irqctx = &per_cpu(hardirq_stack, cpu);
+       irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
 
        per_cpu(hardirq_ctx, cpu) = irqctx;
 
-       irqctx = &per_cpu(softirq_stack, cpu);
+       irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
               cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
 }
 
-void irq_ctx_exit(int cpu)
-{
-       per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
 asmlinkage void do_softirq(void)
 {
        unsigned long flags;
index 70c4872..45892dc 100644 (file)
@@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child)
 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
 #endif
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
        unsigned long __user *datap = (unsigned long __user *)data;
@@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                unsigned long tmp;
 
                ret = -EIO;
-               if ((addr & (sizeof(data) - 1)) || addr < 0 ||
-                   addr >= sizeof(struct user))
+               if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
                        break;
 
                tmp = 0;  /* Default return condition */
@@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                ret = -EIO;
-               if ((addr & (sizeof(data) - 1)) || addr < 0 ||
-                   addr >= sizeof(struct user))
+               if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
                        break;
 
                if (addr < sizeof(struct user_regs_struct))
@@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
        case PTRACE_GET_THREAD_AREA:
-               if (addr < 0)
+               if ((int) addr < 0)
                        return -EIO;
                ret = do_get_thread_area(child, addr,
-                                        (struct user_desc __user *) data);
+                                       (struct user_desc __user *)data);
                break;
 
        case PTRACE_SET_THREAD_AREA:
-               if (addr < 0)
+               if ((int) addr < 0)
                        return -EIO;
                ret = do_set_thread_area(child, addr,
-                                        (struct user_desc __user *) data, 0);
+                                       (struct user_desc __user *)data, 0);
                break;
 #endif
 
index f7f53dc..c495aa8 100644 (file)
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
         */
-       smp_send_stop();
+       stop_other_cpus();
 #endif
 
        lapic_shutdown();
index d801210..513deac 100644 (file)
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
        irq_exit();
 }
 
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
 {
        unsigned long flags;
-       unsigned long wait;
+       unsigned long timeout;
 
        if (reboot_force)
                return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
        if (num_online_cpus() > 1) {
                apic->send_IPI_allbutself(REBOOT_VECTOR);
 
-               /* Don't wait longer than a second */
-               wait = USEC_PER_SEC;
-               while (num_online_cpus() > 1 && wait--)
+               /*
+                * Don't wait longer than a second if the caller
+                * didn't ask us to wait.
+                */
+               timeout = USEC_PER_SEC;
+               while (num_online_cpus() > 1 && (wait || timeout--))
                        udelay(1);
        }
 
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
        .smp_prepare_cpus       = native_smp_prepare_cpus,
        .smp_cpus_done          = native_smp_cpus_done,
 
-       .smp_send_stop          = native_smp_send_stop,
+       .stop_other_cpus        = native_stop_other_cpus,
        .smp_send_reschedule    = native_smp_send_reschedule,
 
        .cpu_up                 = native_cpu_up,
index 6c7faec..083e99d 100644 (file)
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
 {
        idle_task_exit();
        reset_lazy_tlbstate();
-       irq_ctx_exit(raw_smp_processor_id());
        c1e_remove_cpu(raw_smp_processor_id());
 
        mb();
index d723e36..b499626 100644 (file)
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
            vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
                int idx, type;
 
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR * smp_processor_id();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -87,6 +87,7 @@ void __kunmap_atomic(void *kvaddr)
                 * attributes or becomes a protected page in a hypervisor.
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
+               kmap_atomic_idx_pop();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index 8434620..71a5929 100644 (file)
@@ -51,7 +51,6 @@
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
 #include <asm/init.h>
-#include <linux/bootmem.h>
 
 static int __init parse_direct_gbpages_off(char *arg)
 {
index 75a3d7f..7b179b4 100644 (file)
@@ -98,7 +98,7 @@ iounmap_atomic(void __iomem *kvaddr)
            vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
                int idx, type;
 
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR * smp_processor_id();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -111,6 +111,7 @@ iounmap_atomic(void __iomem *kvaddr)
                 * attributes or becomes a protected page in a hypervisor.
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
+               kmap_atomic_idx_pop();
        }
 
        pagefault_enable();
index bd1489c..4e8baad 100644 (file)
@@ -726,6 +726,12 @@ int __init op_nmi_init(struct oprofile_operations *ops)
                case 0x11:
                        cpu_type = "x86-64/family11h";
                        break;
+               case 0x12:
+                       cpu_type = "x86-64/family12h";
+                       break;
+               case 0x14:
+                       cpu_type = "x86-64/family14h";
+                       break;
                default:
                        return -ENODEV;
                }
index 42fb46f..a011bcc 100644 (file)
@@ -48,17 +48,24 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
 
 static u32 ibs_caps;
 
-struct op_ibs_config {
+struct ibs_config {
        unsigned long op_enabled;
        unsigned long fetch_enabled;
        unsigned long max_cnt_fetch;
        unsigned long max_cnt_op;
        unsigned long rand_en;
        unsigned long dispatched_ops;
+       unsigned long branch_target;
 };
 
-static struct op_ibs_config ibs_config;
-static u64 ibs_op_ctl;
+struct ibs_state {
+       u64             ibs_op_ctl;
+       int             branch_target;
+       unsigned long   sample_size;
+};
+
+static struct ibs_config ibs_config;
+static struct ibs_state ibs_state;
 
 /*
  * IBS cpuid feature detection
@@ -71,8 +78,16 @@ static u64 ibs_op_ctl;
  * bit 0 is used to indicate the existence of IBS.
  */
 #define IBS_CAPS_AVAIL                 (1U<<0)
+#define IBS_CAPS_FETCHSAM              (1U<<1)
+#define IBS_CAPS_OPSAM                 (1U<<2)
 #define IBS_CAPS_RDWROPCNT             (1U<<3)
 #define IBS_CAPS_OPCNT                 (1U<<4)
+#define IBS_CAPS_BRNTRGT               (1U<<5)
+#define IBS_CAPS_OPCNTEXT              (1U<<6)
+
+#define IBS_CAPS_DEFAULT               (IBS_CAPS_AVAIL         \
+                                        | IBS_CAPS_FETCHSAM    \
+                                        | IBS_CAPS_OPSAM)
 
 /*
  * IBS APIC setup
@@ -99,12 +114,12 @@ static u32 get_ibs_caps(void)
        /* check IBS cpuid feature flags */
        max_level = cpuid_eax(0x80000000);
        if (max_level < IBS_CPUID_FEATURES)
-               return IBS_CAPS_AVAIL;
+               return IBS_CAPS_DEFAULT;
 
        ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
        if (!(ibs_caps & IBS_CAPS_AVAIL))
                /* cpuid flags not valid */
-               return IBS_CAPS_AVAIL;
+               return IBS_CAPS_DEFAULT;
 
        return ibs_caps;
 }
@@ -197,8 +212,8 @@ op_amd_handle_ibs(struct pt_regs * const regs,
                rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
                if (ctl & IBS_OP_VAL) {
                        rdmsrl(MSR_AMD64_IBSOPRIP, val);
-                       oprofile_write_reserve(&entry, regs, val,
-                                              IBS_OP_CODE, IBS_OP_SIZE);
+                       oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
+                                              ibs_state.sample_size);
                        oprofile_add_data64(&entry, val);
                        rdmsrl(MSR_AMD64_IBSOPDATA, val);
                        oprofile_add_data64(&entry, val);
@@ -210,10 +225,14 @@ op_amd_handle_ibs(struct pt_regs * const regs,
                        oprofile_add_data64(&entry, val);
                        rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
                        oprofile_add_data64(&entry, val);
+                       if (ibs_state.branch_target) {
+                               rdmsrl(MSR_AMD64_IBSBRTARGET, val);
+                               oprofile_add_data(&entry, (unsigned long)val);
+                       }
                        oprofile_write_commit(&entry);
 
                        /* reenable the IRQ */
-                       ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
+                       ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
                        wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
                }
        }
@@ -226,21 +245,32 @@ static inline void op_amd_start_ibs(void)
        if (!ibs_caps)
                return;
 
+       memset(&ibs_state, 0, sizeof(ibs_state));
+
+       /*
+        * Note: Since the max count settings may out of range we
+        * write back the actual used values so that userland can read
+        * it.
+        */
+
        if (ibs_config.fetch_enabled) {
-               val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
+               val = ibs_config.max_cnt_fetch >> 4;
+               val = min(val, IBS_FETCH_MAX_CNT);
+               ibs_config.max_cnt_fetch = val << 4;
                val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
                val |= IBS_FETCH_ENABLE;
                wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
        }
 
        if (ibs_config.op_enabled) {
-               ibs_op_ctl = ibs_config.max_cnt_op >> 4;
+               val = ibs_config.max_cnt_op >> 4;
                if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
                        /*
                         * IbsOpCurCnt not supported.  See
                         * op_amd_randomize_ibs_op() for details.
                         */
-                       ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
+                       val = clamp(val, 0x0081ULL, 0xFF80ULL);
+                       ibs_config.max_cnt_op = val << 4;
                } else {
                        /*
                         * The start value is randomized with a
@@ -248,13 +278,24 @@ static inline void op_amd_start_ibs(void)
                         * with the half of the randomized range. Also
                         * avoid underflows.
                         */
-                       ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
-                                        IBS_OP_MAX_CNT);
+                       val += IBS_RANDOM_MAXCNT_OFFSET;
+                       if (ibs_caps & IBS_CAPS_OPCNTEXT)
+                               val = min(val, IBS_OP_MAX_CNT_EXT);
+                       else
+                               val = min(val, IBS_OP_MAX_CNT);
+                       ibs_config.max_cnt_op =
+                               (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
+               }
+               val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
+               val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+               val |= IBS_OP_ENABLE;
+               ibs_state.ibs_op_ctl = val;
+               ibs_state.sample_size = IBS_OP_SIZE;
+               if (ibs_config.branch_target) {
+                       ibs_state.branch_target = 1;
+                       ibs_state.sample_size++;
                }
-               if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
-                       ibs_op_ctl |= IBS_OP_CNT_CTL;
-               ibs_op_ctl |= IBS_OP_ENABLE;
-               val = op_amd_randomize_ibs_op(ibs_op_ctl);
+               val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
                wrmsrl(MSR_AMD64_IBSOPCTL, val);
        }
 }
@@ -281,29 +322,25 @@ static inline int eilvt_is_available(int offset)
 
 static inline int ibs_eilvt_valid(void)
 {
-       u64 val;
        int offset;
+       u64 val;
 
        rdmsrl(MSR_AMD64_IBSCTL, val);
+       offset = val & IBSCTL_LVT_OFFSET_MASK;
+
        if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
-               pr_err(FW_BUG "cpu %d, invalid IBS "
-                      "interrupt offset %d (MSR%08X=0x%016llx)",
-                      smp_processor_id(), offset,
-                      MSR_AMD64_IBSCTL, val);
+               pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
+                      smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
                return 0;
        }
 
-       offset = val & IBSCTL_LVT_OFFSET_MASK;
-
-       if (eilvt_is_available(offset))
-               return !0;
-
-       pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
-              "not available (MSR%08X=0x%016llx)",
-              smp_processor_id(), offset,
-              MSR_AMD64_IBSCTL, val);
+       if (!eilvt_is_available(offset)) {
+               pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
+                      smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+               return 0;
+       }
 
-       return 0;
+       return 1;
 }
 
 static inline int get_ibs_offset(void)
@@ -630,28 +667,33 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
        /* model specific files */
 
        /* setup some reasonable defaults */
+       memset(&ibs_config, 0, sizeof(ibs_config));
        ibs_config.max_cnt_fetch = 250000;
-       ibs_config.fetch_enabled = 0;
        ibs_config.max_cnt_op = 250000;
-       ibs_config.op_enabled = 0;
-       ibs_config.dispatched_ops = 0;
-
-       dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
-       oprofilefs_create_ulong(sb, dir, "enable",
-                               &ibs_config.fetch_enabled);
-       oprofilefs_create_ulong(sb, dir, "max_count",
-                               &ibs_config.max_cnt_fetch);
-       oprofilefs_create_ulong(sb, dir, "rand_enable",
-                               &ibs_config.rand_en);
-
-       dir = oprofilefs_mkdir(sb, root, "ibs_op");
-       oprofilefs_create_ulong(sb, dir, "enable",
-                               &ibs_config.op_enabled);
-       oprofilefs_create_ulong(sb, dir, "max_count",
-                               &ibs_config.max_cnt_op);
-       if (ibs_caps & IBS_CAPS_OPCNT)
-               oprofilefs_create_ulong(sb, dir, "dispatched_ops",
-                                       &ibs_config.dispatched_ops);
+
+       if (ibs_caps & IBS_CAPS_FETCHSAM) {
+               dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
+               oprofilefs_create_ulong(sb, dir, "enable",
+                                       &ibs_config.fetch_enabled);
+               oprofilefs_create_ulong(sb, dir, "max_count",
+                                       &ibs_config.max_cnt_fetch);
+               oprofilefs_create_ulong(sb, dir, "rand_enable",
+                                       &ibs_config.rand_en);
+       }
+
+       if (ibs_caps & IBS_CAPS_OPSAM) {
+               dir = oprofilefs_mkdir(sb, root, "ibs_op");
+               oprofilefs_create_ulong(sb, dir, "enable",
+                                       &ibs_config.op_enabled);
+               oprofilefs_create_ulong(sb, dir, "max_count",
+                                       &ibs_config.max_cnt_op);
+               if (ibs_caps & IBS_CAPS_OPCNT)
+                       oprofilefs_create_ulong(sb, dir, "dispatched_ops",
+                                               &ibs_config.dispatched_ops);
+               if (ibs_caps & IBS_CAPS_BRNTRGT)
+                       oprofilefs_create_ulong(sb, dir, "branch_target",
+                                               &ibs_config.branch_target);
+       }
 
        return 0;
 }
index 44ab12d..70ddeae 100644 (file)
@@ -59,7 +59,6 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/reboot.h>
-#include <asm/setup.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
 
@@ -1016,7 +1015,7 @@ static void xen_reboot(int reason)
        struct sched_shutdown r = { .reason = reason };
 
 #ifdef CONFIG_SMP
-       smp_send_stop();
+       stop_other_cpus();
 #endif
 
        if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
index 25f232b..f4d0100 100644 (file)
@@ -400,9 +400,9 @@ static void stop_self(void *v)
        BUG();
 }
 
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
 {
-       smp_call_function(stop_self, NULL, 0);
+       smp_call_function(stop_self, NULL, wait);
 }
 
 static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
        .cpu_disable = xen_cpu_disable,
        .play_dead = xen_play_dead,
 
-       .smp_send_stop = xen_smp_send_stop,
+       .stop_other_cpus = xen_stop_other_cpus,
        .smp_send_reschedule = xen_smp_send_reschedule,
 
        .send_call_func_ipi = xen_smp_send_call_function_ipi,
index 9d4e1ce..c72c947 100644 (file)
@@ -256,9 +256,11 @@ int ptrace_pokeusr(struct task_struct *child, long regno, long val)
        return 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret = -EPERM;
+       void __user *datap = (void __user *) data;
 
        switch (request) {
        case PTRACE_PEEKTEXT:   /* read word at location addr. */
@@ -267,7 +269,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_PEEKUSR:    /* read register specified by addr. */
-               ret = ptrace_peekusr(child, addr, (void __user *) data);
+               ret = ptrace_peekusr(child, addr, datap);
                break;
 
        case PTRACE_POKETEXT:   /* write the word at location addr. */
@@ -280,19 +282,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (void __user *) data);
+               ret = ptrace_getregs(child, datap);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (void __user *) data);
+               ret = ptrace_setregs(child, datap);
                break;
 
        case PTRACE_GETXTREGS:
-               ret = ptrace_getxregs(child, (void __user *) data);
+               ret = ptrace_getxregs(child, datap);
                break;
 
        case PTRACE_SETXTREGS:
-               ret = ptrace_setxregs(child, (void __user *) data);
+               ret = ptrace_setxregs(child, datap);
                break;
 
        default:
index 80f9f36..97c5898 100644 (file)
@@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
                eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
                if (readl(&eprom->magic) != ENI155_MAGIC) {
                        printk("\n");
-                       printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
-                           "magic - expected 0x%x, got 0x%x\n",dev->number,
-                           ENI155_MAGIC,(unsigned) readl(&eprom->magic));
+                       printk(KERN_ERR DEV_LABEL
+                              "(itf %d): bad magic - expected 0x%x, got 0x%x\n",
+                              dev->number, ENI155_MAGIC,
+                              (unsigned)readl(&eprom->magic));
                        error = -EINVAL;
                        goto unmap;
                }
index e7ba774..25373df 100644 (file)
@@ -566,6 +566,7 @@ static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_
                                struct mailbox mailbox;
 
                                /* Got a packet for us */
+                               memset(&st_loc, 0, sizeof(st_loc));
                                ret = do_ac_read(i, buf, &st_loc, &mailbox);
                                spin_unlock_irqrestore(&apbs[i].mutex, flags);
                                set_current_state(TASK_RUNNING);
index 3afd62e..e9cba13 100644 (file)
@@ -713,7 +713,6 @@ static int khvcd(void *unused)
        struct hvc_struct *hp;
 
        set_freezable();
-       __set_current_state(TASK_RUNNING);
        do {
                poll_mask = 0;
                hvc_kicked = 0;
index bc397d9..7b78e0d 100644 (file)
@@ -4,5 +4,5 @@
 
 obj-$(CONFIG_COMPUTONE)         += ip2.o
 
-ip2-objs                       := ip2main.o
+ip2-y                  := ip2main.o
 
index eb8a1a8..16a9364 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the ipmi drivers.
 #
 
-ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
 
 obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
 obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
index 1fc8876..2aa3977 100644 (file)
@@ -916,7 +916,7 @@ static struct ipmi_smi_watcher smi_watcher =
        .smi_gone = ipmi_smi_gone,
 };
 
-static __init int init_ipmi_devintf(void)
+static int __init init_ipmi_devintf(void)
 {
        int rv;
 
@@ -954,7 +954,7 @@ static __init int init_ipmi_devintf(void)
 }
 module_init(init_ipmi_devintf);
 
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
 {
        struct ipmi_reg_list *entry, *entry2;
        mutex_lock(&reg_list_mutex);
index 4f3f8c9..2fe72f8 100644 (file)
@@ -4442,13 +4442,13 @@ static int ipmi_init_msghandler(void)
        return 0;
 }
 
-static __init int ipmi_init_msghandler_mod(void)
+static int __init ipmi_init_msghandler_mod(void)
 {
        ipmi_init_msghandler();
        return 0;
 }
 
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
 {
        int count;
 
index b293d57..035da9e 100644 (file)
@@ -1846,7 +1846,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
        return rv;
 }
 
-static __devinit void hardcode_find_bmc(void)
+static void __devinit hardcode_find_bmc(void)
 {
        int             i;
        struct smi_info *info;
@@ -2029,7 +2029,7 @@ struct SPMITable {
        s8      spmi_id[1]; /* A '\0' terminated array starts here. */
 };
 
-static __devinit int try_init_spmi(struct SPMITable *spmi)
+static int __devinit try_init_spmi(struct SPMITable *spmi)
 {
        struct smi_info  *info;
 
@@ -2112,7 +2112,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
        return 0;
 }
 
-static __devinit void spmi_find_bmc(void)
+static void __devinit spmi_find_bmc(void)
 {
        acpi_status      status;
        struct SPMITable *spmi;
@@ -2325,7 +2325,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
        return 0;
 }
 
-static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
 {
        struct smi_info *info;
 
@@ -3012,7 +3012,7 @@ static __devinitdata struct ipmi_default_vals
        { .port = 0 }
 };
 
-static __devinit void default_find_bmc(void)
+static void __devinit default_find_bmc(void)
 {
        struct smi_info *info;
        int             i;
@@ -3312,7 +3312,7 @@ static int try_smi_init(struct smi_info *new_smi)
        return rv;
 }
 
-static __devinit int init_ipmi_si(void)
+static int __devinit init_ipmi_si(void)
 {
        int  i;
        char *str;
@@ -3525,7 +3525,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
        kfree(to_clean);
 }
 
-static __exit void cleanup_ipmi_si(void)
+static void __exit cleanup_ipmi_si(void)
 {
        struct smi_info *e, *tmp_e;
 
index c070b53..e6d7562 100644 (file)
@@ -176,9 +176,9 @@ static void mmtimer_setup_int_2(int cpu, u64 expires)
  * in order to insure that the setup succeeds in a deterministic time frame.
  * It will check if the interrupt setup succeeded.
  */
-static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
+static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
+       u64 *set_completion_time)
 {
-
        switch (comparator) {
        case 0:
                mmtimer_setup_int_0(cpu, expires);
@@ -191,7 +191,8 @@ static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
                break;
        }
        /* We might've missed our expiration time */
-       if (rtc_time() <= expires)
+       *set_completion_time = rtc_time();
+       if (*set_completion_time <= expires)
                return 1;
 
        /*
@@ -227,6 +228,8 @@ static int mmtimer_disable_int(long nasid, int comparator)
 #define TIMER_OFF      0xbadcabLL      /* Timer is not setup */
 #define TIMER_SET      0               /* Comparator is set for this timer */
 
+#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
+
 /* There is one of these for each timer */
 struct mmtimer {
        struct rb_node list;
@@ -242,6 +245,11 @@ struct mmtimer_node {
 };
 static struct mmtimer_node *timers;
 
+static unsigned mmtimer_interval_retry_increment =
+       MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
+module_param(mmtimer_interval_retry_increment, uint, 0644);
+MODULE_PARM_DESC(mmtimer_interval_retry_increment,
+       "RTC ticks to add to expiration on interval retry (default 40)");
 
 /*
  * Add a new mmtimer struct to the node's mmtimer list.
@@ -289,7 +297,8 @@ static void mmtimer_set_next_timer(int nodeid)
        struct mmtimer_node *n = &timers[nodeid];
        struct mmtimer *x;
        struct k_itimer *t;
-       int o;
+       u64 expires, exp, set_completion_time;
+       int i;
 
 restart:
        if (n->next == NULL)
@@ -300,7 +309,8 @@ restart:
        if (!t->it.mmtimer.incr) {
                /* Not an interval timer */
                if (!mmtimer_setup(x->cpu, COMPARATOR,
-                                       t->it.mmtimer.expires)) {
+                                       t->it.mmtimer.expires,
+                                       &set_completion_time)) {
                        /* Late setup, fire now */
                        tasklet_schedule(&n->tasklet);
                }
@@ -308,14 +318,23 @@ restart:
        }
 
        /* Interval timer */
-       o = 0;
-       while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
-               unsigned long e, e1;
-               struct rb_node *next;
-               t->it.mmtimer.expires += t->it.mmtimer.incr << o;
-               t->it_overrun += 1 << o;
-               o++;
-               if (o > 20) {
+       i = 0;
+       expires = exp = t->it.mmtimer.expires;
+       while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
+                               &set_completion_time)) {
+               int to;
+
+               i++;
+               expires = set_completion_time +
+                               mmtimer_interval_retry_increment + (1 << i);
+               /* Calculate overruns as we go. */
+               to = ((u64)(expires - exp) / t->it.mmtimer.incr);
+               if (to) {
+                       t->it_overrun += to;
+                       t->it.mmtimer.expires += t->it.mmtimer.incr * to;
+                       exp = t->it.mmtimer.expires;
+               }
+               if (i > 20) {
                        printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
                        t->it.mmtimer.clock = TIMER_OFF;
                        n->next = rb_next(&x->list);
@@ -323,21 +342,6 @@ restart:
                        kfree(x);
                        goto restart;
                }
-
-               e = t->it.mmtimer.expires;
-               next = rb_next(&x->list);
-
-               if (next == NULL)
-                       continue;
-
-               e1 = rb_entry(next, struct mmtimer, list)->
-                       timer->it.mmtimer.expires;
-               if (e > e1) {
-                       n->next = next;
-                       rb_erase(&x->list, &n->timer_head);
-                       mmtimer_add_list(x);
-                       goto restart;
-               }
        }
 }
 
index 754c9e2..26b4fce 100644 (file)
@@ -6,10 +6,10 @@
 
 obj-$(CONFIG_MWAVE) += mwave.o
 
-mwave-objs := mwavedd.o smapi.o tp3780i.o 3780i.o
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
 
 # To have the mwave driver disable other uarts if necessary
 # EXTRA_CFLAGS += -DMWAVE_FUTZ_WITH_OTHER_DEVICES
 
 # To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-EXTRA_CFLAGS += -DMW_TRACE
+ccflags-y := -DMW_TRACE
index 463df27..dd9d753 100644 (file)
@@ -303,6 +303,7 @@ static void mxser_enable_must_enchance_mode(unsigned long baseio)
        outb(oldlcr, baseio + UART_LCR);
 }
 
+#ifdef CONFIG_PCI
 static void mxser_disable_must_enchance_mode(unsigned long baseio)
 {
        u8 oldlcr;
@@ -317,6 +318,7 @@ static void mxser_disable_must_enchance_mode(unsigned long baseio)
        outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
        outb(oldlcr, baseio + UART_LCR);
 }
+#endif
 
 static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
 {
@@ -388,6 +390,7 @@ static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
        outb(oldlcr, baseio + UART_LCR);
 }
 
+#ifdef CONFIG_PCI
 static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
 {
        u8 oldlcr;
@@ -404,6 +407,7 @@ static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
        *pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
        outb(oldlcr, baseio + UART_LCR);
 }
+#endif
 
 static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
 {
index b71eb59..db80873 100644 (file)
@@ -6,5 +6,5 @@
 
 obj-$(CONFIG_IPWIRELESS) += ipwireless.o
 
-ipwireless-objs := hardware.o main.o network.o tty.o
+ipwireless-y := hardware.o main.o network.o tty.o
 
index 723152d..f176dba 100644 (file)
@@ -613,6 +613,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PPGETTIME:
                to_jiffies = pp->pdev->timeout;
+               memset(&par_timeout, 0, sizeof(par_timeout));
                par_timeout.tv_sec = to_jiffies / HZ;
                par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
                if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
index 74f00b5..73dcb0e 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/time.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/ramoops.h>
 
 #define RAMOOPS_KERNMSG_HDR "===="
 #define RAMOOPS_HEADER_SIZE   (5 + sizeof(struct timeval))
@@ -91,11 +93,17 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
        cxt->count = (cxt->count + 1) % cxt->max_count;
 }
 
-static int __init ramoops_init(void)
+static int __init ramoops_probe(struct platform_device *pdev)
 {
+       struct ramoops_platform_data *pdata = pdev->dev.platform_data;
        struct ramoops_context *cxt = &oops_cxt;
        int err = -EINVAL;
 
+       if (pdata) {
+               mem_size = pdata->mem_size;
+               mem_address = pdata->mem_address;
+       }
+
        if (!mem_size) {
                printk(KERN_ERR "ramoops: invalid size specification");
                goto fail3;
@@ -142,7 +150,7 @@ fail3:
        return err;
 }
 
-static void __exit ramoops_exit(void)
+static int __exit ramoops_remove(struct platform_device *pdev)
 {
        struct ramoops_context *cxt = &oops_cxt;
 
@@ -151,8 +159,26 @@ static void __exit ramoops_exit(void)
 
        iounmap(cxt->virt_addr);
        release_mem_region(cxt->phys_addr, cxt->size);
+       return 0;
 }
 
+static struct platform_driver ramoops_driver = {
+       .remove         = __exit_p(ramoops_remove),
+       .driver         = {
+               .name   = "ramoops",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init ramoops_init(void)
+{
+       return platform_driver_probe(&ramoops_driver, ramoops_probe);
+}
+
+static void __exit ramoops_exit(void)
+{
+       platform_driver_unregister(&ramoops_driver);
+}
 
 module_init(ramoops_init);
 module_exit(ramoops_exit);
index 2d1c5a7..1661875 100644 (file)
@@ -8,5 +8,5 @@
 
 obj-$(CONFIG_RIO) += rio.o
 
-rio-objs := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
+rio-y := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
             rioparam.o rioroute.o riotable.o riotty.o
index 7c79d24..8630883 100644 (file)
@@ -2345,7 +2345,7 @@ static int __init rp_init(void)
        ret = tty_register_driver(rocket_driver);
        if (ret < 0) {
                printk(KERN_ERR "Couldn't install tty RocketPort driver\n");
-               goto err_tty;
+               goto err_controller;
        }
 
 #ifdef ROCKET_DEBUG_OPEN
@@ -2380,6 +2380,9 @@ static int __init rp_init(void)
        return 0;
 err_ttyu:
        tty_unregister_driver(rocket_driver);
+err_controller:
+       if (controller)
+               release_region(controller, 4);
 err_tty:
        put_tty_driver(rocket_driver);
 err:
index 1746d91..d01fffe 100644 (file)
@@ -301,6 +301,8 @@ struct slgt_info {
        unsigned int rx_pio;
        unsigned int if_mode;
        unsigned int base_clock;
+       unsigned int xsync;
+       unsigned int xctrl;
 
        /* device status */
 
@@ -405,6 +407,8 @@ static MGSL_PARAMS default_params = {
 #define TDCSR 0x94 /* tx DMA control/status */
 #define RDDAR 0x98 /* rx DMA descriptor address */
 #define TDDAR 0x9c /* tx DMA descriptor address */
+#define XSR   0x40 /* extended sync pattern */
+#define XCR   0x44 /* extended control */
 
 #define RXIDLE      BIT14
 #define RXBREAK     BIT14
@@ -517,6 +521,10 @@ static int  set_interface(struct slgt_info *info, int if_mode);
 static int  set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
 static int  get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
 static int  wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int  get_xsync(struct slgt_info *info, int __user *if_mode);
+static int  set_xsync(struct slgt_info *info, int if_mode);
+static int  get_xctrl(struct slgt_info *info, int __user *if_mode);
+static int  set_xctrl(struct slgt_info *info, int if_mode);
 
 /*
  * driver functions
@@ -1056,6 +1064,14 @@ static int ioctl(struct tty_struct *tty, struct file *file,
                return get_gpio(info, argp);
        case MGSL_IOCWAITGPIO:
                return wait_gpio(info, argp);
+       case MGSL_IOCGXSYNC:
+               return get_xsync(info, argp);
+       case MGSL_IOCSXSYNC:
+               return set_xsync(info, (int)arg);
+       case MGSL_IOCGXCTRL:
+               return get_xctrl(info, argp);
+       case MGSL_IOCSXCTRL:
+               return set_xctrl(info, (int)arg);
        }
        mutex_lock(&info->port.mutex);
        switch (cmd) {
@@ -1132,6 +1148,7 @@ static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *us
        struct MGSL_PARAMS32 tmp_params;
 
        DBGINFO(("%s get_params32\n", info->device_name));
+       memset(&tmp_params, 0, sizeof(tmp_params));
        tmp_params.mode            = (compat_ulong_t)info->params.mode;
        tmp_params.loopback        = info->params.loopback;
        tmp_params.flags           = info->params.flags;
@@ -1212,12 +1229,16 @@ static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
        case MGSL_IOCSGPIO:
        case MGSL_IOCGGPIO:
        case MGSL_IOCWAITGPIO:
+       case MGSL_IOCGXSYNC:
+       case MGSL_IOCGXCTRL:
        case MGSL_IOCSTXIDLE:
        case MGSL_IOCTXENABLE:
        case MGSL_IOCRXENABLE:
        case MGSL_IOCTXABORT:
        case TIOCMIWAIT:
        case MGSL_IOCSIF:
+       case MGSL_IOCSXSYNC:
+       case MGSL_IOCSXCTRL:
                rc = ioctl(tty, file, cmd, arg);
                break;
        }
@@ -1617,6 +1638,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        if (cmd != SIOCWANDEV)
                return hdlc_ioctl(dev, ifr, cmd);
 
+       memset(&new_line, 0, sizeof(new_line));
+
        switch(ifr->ifr_settings.type) {
        case IF_GET_IFACE: /* return current sync_serial_settings */
 
@@ -1958,6 +1981,7 @@ static void bh_handler(struct work_struct *work)
                        case MGSL_MODE_RAW:
                        case MGSL_MODE_MONOSYNC:
                        case MGSL_MODE_BISYNC:
+                       case MGSL_MODE_XSYNC:
                                while(rx_get_buf(info));
                                break;
                        }
@@ -2357,26 +2381,27 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
 
        DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
 
-       spin_lock(&info->lock);
-
        while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
                DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
                info->irq_occurred = true;
                for(i=0; i < info->port_count ; i++) {
                        if (info->port_array[i] == NULL)
                                continue;
+                       spin_lock(&info->port_array[i]->lock);
                        if (gsr & (BIT8 << i))
                                isr_serial(info->port_array[i]);
                        if (gsr & (BIT16 << (i*2)))
                                isr_rdma(info->port_array[i]);
                        if (gsr & (BIT17 << (i*2)))
                                isr_tdma(info->port_array[i]);
+                       spin_unlock(&info->port_array[i]->lock);
                }
        }
 
        if (info->gpio_present) {
                unsigned int state;
                unsigned int changed;
+               spin_lock(&info->lock);
                while ((changed = rd_reg32(info, IOSR)) != 0) {
                        DBGISR(("%s iosr=%08x\n", info->device_name, changed));
                        /* read latched state of GPIO signals */
@@ -2388,22 +2413,24 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
                                        isr_gpio(info->port_array[i], changed, state);
                        }
                }
+               spin_unlock(&info->lock);
        }
 
        for(i=0; i < info->port_count ; i++) {
                struct slgt_info *port = info->port_array[i];
-
-               if (port && (port->port.count || port->netcount) &&
+               if (port == NULL)
+                       continue;
+               spin_lock(&port->lock);
+               if ((port->port.count || port->netcount) &&
                    port->pending_bh && !port->bh_running &&
                    !port->bh_requested) {
                        DBGISR(("%s bh queued\n", port->device_name));
                        schedule_work(&port->task);
                        port->bh_requested = true;
                }
+               spin_unlock(&port->lock);
        }
 
-       spin_unlock(&info->lock);
-
        DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
        return IRQ_HANDLED;
 }
@@ -2883,6 +2910,69 @@ static int set_interface(struct slgt_info *info, int if_mode)
        return 0;
 }
 
+static int get_xsync(struct slgt_info *info, int __user *xsync)
+{
+       DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
+       if (put_user(info->xsync, xsync))
+               return -EFAULT;
+       return 0;
+}
+
+/*
+ * set extended sync pattern (1 to 4 bytes) for extended sync mode
+ *
+ * sync pattern is contained in least significant bytes of value
+ * most significant byte of sync pattern is oldest (1st sent/detected)
+ */
+static int set_xsync(struct slgt_info *info, int xsync)
+{
+       unsigned long flags;
+
+       DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
+       spin_lock_irqsave(&info->lock, flags);
+       info->xsync = xsync;
+       wr_reg32(info, XSR, xsync);
+       spin_unlock_irqrestore(&info->lock, flags);
+       return 0;
+}
+
+static int get_xctrl(struct slgt_info *info, int __user *xctrl)
+{
+       DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
+       if (put_user(info->xctrl, xctrl))
+               return -EFAULT;
+       return 0;
+}
+
+/*
+ * set extended control options
+ *
+ * xctrl[31:19] reserved, must be zero
+ * xctrl[18:17] extended sync pattern length in bytes
+ *              00 = 1 byte  in xsr[7:0]
+ *              01 = 2 bytes in xsr[15:0]
+ *              10 = 3 bytes in xsr[23:0]
+ *              11 = 4 bytes in xsr[31:0]
+ * xctrl[16]    1 = enable terminal count, 0=disabled
+ * xctrl[15:0]  receive terminal count for fixed length packets
+ *              value is count minus one (0 = 1 byte packet)
+ *              when terminal count is reached, receiver
+ *              automatically returns to hunt mode and receive
+ *              FIFO contents are flushed to DMA buffers with
+ *              end of frame (EOF) status
+ */
+static int set_xctrl(struct slgt_info *info, int xctrl)
+{
+       unsigned long flags;
+
+       DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
+       spin_lock_irqsave(&info->lock, flags);
+       info->xctrl = xctrl;
+       wr_reg32(info, XCR, xctrl);
+       spin_unlock_irqrestore(&info->lock, flags);
+       return 0;
+}
+
 /*
  * set general purpose IO pin state and direction
  *
@@ -2906,7 +2996,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
                 info->device_name, gpio.state, gpio.smask,
                 gpio.dir, gpio.dmask));
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->port_array[0]->lock, flags);
        if (gpio.dmask) {
                data = rd_reg32(info, IODR);
                data |= gpio.dmask & gpio.dir;
@@ -2919,7 +3009,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
                data &= ~(gpio.smask & ~gpio.state);
                wr_reg32(info, IOVR, data);
        }
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
 
        return 0;
 }
@@ -3020,7 +3110,7 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
                return -EINVAL;
        init_cond_wait(&wait, gpio.smask);
 
-       spin_lock_irqsave(&info->lock, flags);
+       spin_lock_irqsave(&info->port_array[0]->lock, flags);
        /* enable interrupts for watched pins */
        wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
        /* get current pin states */
@@ -3032,20 +3122,20 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
        } else {
                /* wait for target state */
                add_cond_wait(&info->gpio_wait_q, &wait);
-               spin_unlock_irqrestore(&info->lock, flags);
+               spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
                schedule();
                if (signal_pending(current))
                        rc = -ERESTARTSYS;
                else
                        gpio.state = wait.data;
-               spin_lock_irqsave(&info->lock, flags);
+               spin_lock_irqsave(&info->port_array[0]->lock, flags);
                remove_cond_wait(&info->gpio_wait_q, &wait);
        }
 
        /* disable all GPIO interrupts if no waiting processes */
        if (info->gpio_wait_q == NULL)
                wr_reg32(info, IOER, 0);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
 
        if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
                rc = -EFAULT;
@@ -3578,7 +3668,6 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
 
                /* copy resource information from first port to others */
                for (i = 1; i < port_count; ++i) {
-                       port_array[i]->lock      = port_array[0]->lock;
                        port_array[i]->irq_level = port_array[0]->irq_level;
                        port_array[i]->reg_addr  = port_array[0]->reg_addr;
                        alloc_dma_bufs(port_array[i]);
@@ -3763,7 +3852,9 @@ module_exit(slgt_exit);
 #define CALC_REGADDR() \
        unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
        if (addr >= 0x80) \
-               reg_addr += (info->port_num) * 32;
+               reg_addr += (info->port_num) * 32; \
+       else if (addr >= 0x40)  \
+               reg_addr += (info->port_num) * 16;
 
 static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
 {
@@ -4182,7 +4273,13 @@ static void sync_mode(struct slgt_info *info)
 
        /* TCR (tx control)
         *
-        * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+        * 15..13  mode
+        *         000=HDLC/SDLC
+        *         001=raw bit synchronous
+        *         010=asynchronous/isochronous
+        *         011=monosync byte synchronous
+        *         100=bisync byte synchronous
+        *         101=xsync byte synchronous
         * 12..10  encoding
         * 09      CRC enable
         * 08      CRC32
@@ -4197,6 +4294,9 @@ static void sync_mode(struct slgt_info *info)
        val = BIT2;
 
        switch(info->params.mode) {
+       case MGSL_MODE_XSYNC:
+               val |= BIT15 + BIT13;
+               break;
        case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
        case MGSL_MODE_BISYNC:   val |= BIT15; break;
        case MGSL_MODE_RAW:      val |= BIT13; break;
@@ -4251,7 +4351,13 @@ static void sync_mode(struct slgt_info *info)
 
        /* RCR (rx control)
         *
-        * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+        * 15..13  mode
+        *         000=HDLC/SDLC
+        *         001=raw bit synchronous
+        *         010=asynchronous/isochronous
+        *         011=monosync byte synchronous
+        *         100=bisync byte synchronous
+        *         101=xsync byte synchronous
         * 12..10  encoding
         * 09      CRC enable
         * 08      CRC32
@@ -4263,6 +4369,9 @@ static void sync_mode(struct slgt_info *info)
        val = 0;
 
        switch(info->params.mode) {
+       case MGSL_MODE_XSYNC:
+               val |= BIT15 + BIT13;
+               break;
        case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
        case MGSL_MODE_BISYNC:   val |= BIT15; break;
        case MGSL_MODE_RAW:      val |= BIT13; break;
@@ -4679,6 +4788,7 @@ static bool rx_get_buf(struct slgt_info *info)
        switch(info->params.mode) {
        case MGSL_MODE_MONOSYNC:
        case MGSL_MODE_BISYNC:
+       case MGSL_MODE_XSYNC:
                /* ignore residue in byte synchronous modes */
                if (desc_residue(info->rbufs[i]))
                        count--;
index 38df8c1..6b68a0f 100644 (file)
@@ -503,6 +503,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        struct kbd_struct * kbd;
        unsigned int console;
        unsigned char ucval;
+       unsigned int uival;
        void __user *up = (void __user *)arg;
        int i, perm;
        int ret = 0;
@@ -657,7 +658,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGETMODE:
-               ucval = vc->vc_mode;
+               uival = vc->vc_mode;
                goto setint;
 
        case KDMAPDISP:
@@ -695,7 +696,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGKBMODE:
-               ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
+               uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
                                 (kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
                                 (kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
                                 K_XLATE);
@@ -717,9 +718,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGKBMETA:
-               ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+               uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
        setint:
-               ret = put_user(ucval, (int __user *)arg);
+               ret = put_user(uival, (int __user *)arg);
                break;
 
        case KDGETKEYCODE:
@@ -949,7 +950,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                for (i = 0; i < MAX_NR_CONSOLES; ++i)
                        if (! VT_IS_IN_USE(i))
                                break;
-               ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
+               uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
                goto setint;             
 
        /*
index 210338e..81270d2 100644 (file)
 #include <linux/connector.h>
 #include <linux/delay.h>
 
-
-/*
- * This job is sent to the kevent workqueue.
- * While no event is once sent to any callback, the connector workqueue
- * is not created to avoid a useless waiting kernel task.
- * Once the first event is received, we create this dedicated workqueue which
- * is necessary because the flow of data can be high and we don't want
- * to encumber keventd with that.
- */
-static void cn_queue_create(struct work_struct *work)
-{
-       struct cn_queue_dev *dev;
-
-       dev = container_of(work, struct cn_queue_dev, wq_creation);
-
-       dev->cn_queue = create_singlethread_workqueue(dev->name);
-       /* If we fail, we will use keventd for all following connector jobs */
-       WARN_ON(!dev->cn_queue);
-}
-
-/*
- * Queue a data sent to a callback.
- * If the connector workqueue is already created, we queue the job on it.
- * Otherwise, we queue the job to kevent and queue the connector workqueue
- * creation too.
- */
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
-{
-       struct cn_queue_dev *pdev = cbq->pdev;
-
-       if (likely(pdev->cn_queue))
-               return queue_work(pdev->cn_queue, work);
-
-       /* Don't create the connector workqueue twice */
-       if (atomic_inc_return(&pdev->wq_requested) == 1)
-               schedule_work(&pdev->wq_creation);
-       else
-               atomic_dec(&pdev->wq_requested);
-
-       return schedule_work(work);
-}
-
 void cn_queue_wrapper(struct work_struct *work)
 {
        struct cn_callback_entry *cbq =
@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
 
 static void cn_queue_free_callback(struct cn_callback_entry *cbq)
 {
-       /* The first jobs have been sent to kevent, flush them too */
-       flush_scheduled_work();
-       if (cbq->pdev->cn_queue)
-               flush_workqueue(cbq->pdev->cn_queue);
-
+       flush_workqueue(cbq->pdev->cn_queue);
        kfree(cbq);
 }
 
@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
        atomic_set(&dev->refcnt, 0);
        INIT_LIST_HEAD(&dev->queue_list);
        spin_lock_init(&dev->queue_lock);
-       init_waitqueue_head(&dev->wq_created);
 
        dev->nls = nls;
 
-       INIT_WORK(&dev->wq_creation, cn_queue_create);
+       dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
+       if (!dev->cn_queue) {
+               kfree(dev);
+               return NULL;
+       }
 
        return dev;
 }
@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
 void cn_queue_free_dev(struct cn_queue_dev *dev)
 {
        struct cn_callback_entry *cbq, *n;
-       long timeout;
-       DEFINE_WAIT(wait);
-
-       /* Flush the first pending jobs queued on kevent */
-       flush_scheduled_work();
-
-       /* If the connector workqueue creation is still pending, wait for it */
-       prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
-       if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
-               timeout = schedule_timeout(HZ * 2);
-               if (!timeout && !dev->cn_queue)
-                       WARN_ON(1);
-       }
-       finish_wait(&dev->wq_created, &wait);
 
-       if (dev->cn_queue) {
-               flush_workqueue(dev->cn_queue);
-               destroy_workqueue(dev->cn_queue);
-       }
+       flush_workqueue(dev->cn_queue);
+       destroy_workqueue(dev->cn_queue);
 
        spin_lock_bh(&dev->queue_lock);
        list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
index 1d48f40..e16c3fa 100644 (file)
@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
                                        __cbq->data.skb == NULL)) {
                                __cbq->data.skb = skb;
 
-                               if (queue_cn_work(__cbq, &__cbq->work))
+                               if (queue_work(dev->cbdev->cn_queue,
+                                              &__cbq->work))
                                        err = 0;
                                else
                                        err = -EINVAL;
@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
                                        d->callback = __cbq->data.callback;
                                        d->free = __new_cbq;
 
-                                       __new_cbq->pdev = __cbq->pdev;
-
                                        INIT_WORK(&__new_cbq->work,
                                                        &cn_queue_wrapper);
 
-                                       if (queue_cn_work(__new_cbq,
-                                                   &__new_cbq->work))
+                                       if (queue_work(dev->cbdev->cn_queue,
+                                                      &__new_cbq->work))
                                                err = 0;
                                        else {
                                                kfree(__new_cbq);
index 3533948..92b6790 100644 (file)
@@ -926,6 +926,7 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
 static const struct pci_device_id pch_dma_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+       { 0, },
 };
 
 static struct pci_driver pch_dma_driver = {
index b3d22d6..e28e416 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/ctype.h>
 #include <linux/dmi.h>
 #include <linux/efi.h>
 #include <linux/bootmem.h>
@@ -361,6 +362,33 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
        }
 }
 
+static void __init print_filtered(const char *info)
+{
+       const char *p;
+
+       if (!info)
+               return;
+
+       for (p = info; *p; p++)
+               if (isprint(*p))
+                       printk(KERN_CONT "%c", *p);
+               else
+                       printk(KERN_CONT "\\x%02x", *p & 0xff);
+}
+
+static void __init dmi_dump_ids(void)
+{
+       printk(KERN_DEBUG "DMI: ");
+       print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
+       printk(KERN_CONT "/");
+       print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+       printk(KERN_CONT ", BIOS ");
+       print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
+       printk(KERN_CONT " ");
+       print_filtered(dmi_get_system_info(DMI_BIOS_DATE));
+       printk(KERN_CONT "\n");
+}
+
 static int __init dmi_present(const char __iomem *p)
 {
        u8 buf[15];
@@ -381,8 +409,10 @@ static int __init dmi_present(const char __iomem *p)
                               buf[14] >> 4, buf[14] & 0xF);
                else
                        printk(KERN_INFO "DMI present.\n");
-               if (dmi_walk_early(dmi_decode) == 0)
+               if (dmi_walk_early(dmi_decode) == 0) {
+                       dmi_dump_ids();
                        return 0;
+               }
        }
        return 1;
 }
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/74x164.c
new file mode 100644 (file)
index 0000000..d91ff4c
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ *  74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
+ *
+ *  Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/74x164.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define GEN_74X164_GPIO_COUNT  8
+
+
+struct gen_74x164_chip {
+       struct spi_device       *spi;
+       struct gpio_chip        gpio_chip;
+       struct mutex            lock;
+       u8                      port_config;
+};
+
+static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
+
+static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
+{
+       return container_of(gc, struct gen_74x164_chip, gpio_chip);
+}
+
+static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
+{
+       return spi_write(chip->spi,
+                        &chip->port_config, sizeof(chip->port_config));
+}
+
+static int gen_74x164_direction_output(struct gpio_chip *gc,
+               unsigned offset, int val)
+{
+       gen_74x164_set_value(gc, offset, val);
+       return 0;
+}
+
+static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
+{
+       struct gen_74x164_chip *chip = gpio_to_chip(gc);
+       int ret;
+
+       mutex_lock(&chip->lock);
+       ret = (chip->port_config >> offset) & 0x1;
+       mutex_unlock(&chip->lock);
+
+       return ret;
+}
+
+static void gen_74x164_set_value(struct gpio_chip *gc,
+               unsigned offset, int val)
+{
+       struct gen_74x164_chip *chip = gpio_to_chip(gc);
+
+       mutex_lock(&chip->lock);
+       if (val)
+               chip->port_config |= (1 << offset);
+       else
+               chip->port_config &= ~(1 << offset);
+
+       __gen_74x164_write_config(chip);
+       mutex_unlock(&chip->lock);
+}
+
+static int __devinit gen_74x164_probe(struct spi_device *spi)
+{
+       struct gen_74x164_chip *chip;
+       struct gen_74x164_chip_platform_data *pdata;
+       int ret;
+
+       pdata = spi->dev.platform_data;
+       if (!pdata || !pdata->base) {
+               dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+               return -EINVAL;
+       }
+
+       /*
+        * bits_per_word cannot be configured in platform data
+        */
+       spi->bits_per_word = 8;
+
+       ret = spi_setup(spi);
+       if (ret < 0)
+               return ret;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       mutex_init(&chip->lock);
+
+       dev_set_drvdata(&spi->dev, chip);
+
+       chip->spi = spi;
+
+       chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
+               chip->gpio_chip.direction_output = gen_74x164_direction_output;
+       chip->gpio_chip.get = gen_74x164_get_value;
+       chip->gpio_chip.set = gen_74x164_set_value;
+       chip->gpio_chip.base = pdata->base;
+       chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
+       chip->gpio_chip.can_sleep = 1;
+       chip->gpio_chip.dev = &spi->dev;
+       chip->gpio_chip.owner = THIS_MODULE;
+
+       ret = __gen_74x164_write_config(chip);
+       if (ret) {
+               dev_err(&spi->dev, "Failed writing: %d\n", ret);
+               goto exit_destroy;
+       }
+
+       ret = gpiochip_add(&chip->gpio_chip);
+       if (ret)
+               goto exit_destroy;
+
+       return ret;
+
+exit_destroy:
+       dev_set_drvdata(&spi->dev, NULL);
+       mutex_destroy(&chip->lock);
+       kfree(chip);
+       return ret;
+}
+
+static int gen_74x164_remove(struct spi_device *spi)
+{
+       struct gen_74x164_chip *chip;
+       int ret;
+
+       chip = dev_get_drvdata(&spi->dev);
+       if (chip == NULL)
+               return -ENODEV;
+
+       dev_set_drvdata(&spi->dev, NULL);
+
+       ret = gpiochip_remove(&chip->gpio_chip);
+       if (!ret) {
+               mutex_destroy(&chip->lock);
+               kfree(chip);
+       } else
+               dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
+                               ret);
+
+       return ret;
+}
+
+static struct spi_driver gen_74x164_driver = {
+       .driver = {
+               .name           = GEN_74X164_DRIVER_NAME,
+               .owner          = THIS_MODULE,
+       },
+       .probe          = gen_74x164_probe,
+       .remove         = __devexit_p(gen_74x164_remove),
+};
+
+static int __init gen_74x164_init(void)
+{
+       return spi_register_driver(&gen_74x164_driver);
+}
+subsys_initcall(gen_74x164_init);
+
+static void __exit gen_74x164_exit(void)
+{
+       spi_unregister_driver(&gen_74x164_driver);
+}
+module_exit(gen_74x164_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
+MODULE_DESCRIPTION("GPIO expander driver for 74X164 8-bits shift register");
+MODULE_LICENSE("GPL v2");
index 510aa20..dd9b4ba 100644 (file)
@@ -70,6 +70,11 @@ config GPIO_MAX730X
 
 comment "Memory mapped GPIO expanders:"
 
+config GPIO_BASIC_MMIO
+       tristate "Basic memory-mapped GPIO controllers support"
+       help
+         Say yes here to support basic memory-mapped GPIO controllers.
+
 config GPIO_IT8761E
        tristate "IT8761E GPIO support"
        depends on GPIOLIB
@@ -267,6 +272,13 @@ config GPIO_ADP5588
          To compile this driver as a module, choose M here: the module will be
          called adp5588-gpio.
 
+config GPIO_ADP5588_IRQ
+       bool "Interrupt controller support for ADP5588"
+       depends on GPIO_ADP5588=y
+       help
+         Say yes here to enable the adp5588 to be used as an interrupt
+         controller. It requires the driver to be built in the kernel.
+
 comment "PCI GPIO expanders:"
 
 config GPIO_CS5535
@@ -301,6 +313,14 @@ config GPIO_LANGWELL
        help
          Say Y here to support Intel Langwell/Penwell GPIO.
 
+config GPIO_PCH
+       tristate "PCH GPIO of Intel Topcliff"
+       depends on PCI
+       help
+         This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
+         which is an IOH(Input/Output Hub) for x86 embedded processor.
+         This driver can access PCH GPIO device.
+
 config GPIO_TIMBERDALE
        bool "Support for timberdale GPIO IP"
        depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -339,6 +359,14 @@ config GPIO_MC33880
          SPI driver for Freescale MC33880 high-side/low-side switch.
          This provides GPIO interface supporting inputs and outputs.
 
+config GPIO_74X164
+       tristate "74x164 serial-in/parallel-out 8-bits shift register"
+       depends on SPI_MASTER
+       help
+         Platform driver for 74x164 compatible serial-in/parallel-out
+         8-outputs shift registers. This driver can be used to provide access
+         to more gpio outputs.
+
 comment "AC97 GPIO expanders:"
 
 config GPIO_UCB1400
index fc6019d..da2ecde 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_GPIOLIB)         += gpiolib.o
 
 obj-$(CONFIG_GPIO_ADP5520)     += adp5520-gpio.o
 obj-$(CONFIG_GPIO_ADP5588)     += adp5588-gpio.o
+obj-$(CONFIG_GPIO_BASIC_MMIO)  += basic_mmio_gpio.o
 obj-$(CONFIG_GPIO_LANGWELL)    += langwell_gpio.o
 obj-$(CONFIG_GPIO_MAX730X)     += max730x.o
 obj-$(CONFIG_GPIO_MAX7300)     += max7300.o
@@ -17,8 +18,10 @@ obj-$(CONFIG_GPIO_MAX7301)   += max7301.o
 obj-$(CONFIG_GPIO_MAX732X)     += max732x.o
 obj-$(CONFIG_GPIO_MC33880)     += mc33880.o
 obj-$(CONFIG_GPIO_MCP23S08)    += mcp23s08.o
+obj-$(CONFIG_GPIO_74X164)      += 74x164.o
 obj-$(CONFIG_GPIO_PCA953X)     += pca953x.o
 obj-$(CONFIG_GPIO_PCF857X)     += pcf857x.o
+obj-$(CONFIG_GPIO_PCH)         += pch_gpio.o
 obj-$(CONFIG_GPIO_PL061)       += pl061.o
 obj-$(CONFIG_GPIO_STMPE)       += stmpe-gpio.o
 obj-$(CONFIG_GPIO_TC35892)     += tc35892-gpio.o
index 2e8e9e2..0871f78 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * GPIO Chip driver for Analog Devices
- * ADP5588 I/O Expander and QWERTY Keypad Controller
+ * ADP5588/ADP5587 I/O Expander and QWERTY Keypad Controller
  *
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <linux/i2c/adp5588.h>
 
-#define DRV_NAME               "adp5588-gpio"
-#define MAXGPIO                        18
-#define ADP_BANK(offs)         ((offs) >> 3)
-#define ADP_BIT(offs)          (1u << ((offs) & 0x7))
+#define DRV_NAME       "adp5588-gpio"
+
+/*
+ * Early pre 4.0 Silicon required to delay readout by at least 25ms,
+ * since the Event Counter Register updated 25ms after the interrupt
+ * asserted.
+ */
+#define WA_DELAYED_READOUT_REVID(rev)  ((rev) < 4)
 
 struct adp5588_gpio {
        struct i2c_client *client;
        struct gpio_chip gpio_chip;
        struct mutex lock;      /* protect cached dir, dat_out */
+       /* protect serialized access to the interrupt controller bus */
+       struct mutex irq_lock;
        unsigned gpio_start;
+       unsigned irq_base;
        uint8_t dat_out[3];
        uint8_t dir[3];
+       uint8_t int_lvl[3];
+       uint8_t int_en[3];
+       uint8_t irq_mask[3];
+       uint8_t irq_stat[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -55,8 +68,8 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
-                 & ADP_BIT(off));
+       return !!(adp5588_gpio_read(dev->client,
+                 GPIO_DAT_STAT1 + ADP5588_BANK(off)) & ADP5588_BIT(off));
 }
 
 static void adp5588_gpio_set_value(struct gpio_chip *chip,
@@ -66,8 +79,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       bank = ADP_BANK(off);
-       bit = ADP_BIT(off);
+       bank = ADP5588_BANK(off);
+       bit = ADP5588_BIT(off);
 
        mutex_lock(&dev->lock);
        if (val)
@@ -87,10 +100,10 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       bank = ADP_BANK(off);
+       bank = ADP5588_BANK(off);
 
        mutex_lock(&dev->lock);
-       dev->dir[bank] &= ~ADP_BIT(off);
+       dev->dir[bank] &= ~ADP5588_BIT(off);
        ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
        mutex_unlock(&dev->lock);
 
@@ -105,8 +118,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       bank = ADP_BANK(off);
-       bit = ADP_BIT(off);
+       bank = ADP5588_BANK(off);
+       bit = ADP5588_BIT(off);
 
        mutex_lock(&dev->lock);
        dev->dir[bank] |= bit;
@@ -125,6 +138,213 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
        return ret;
 }
 
+#ifdef CONFIG_GPIO_ADP5588_IRQ
+static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off)
+{
+       struct adp5588_gpio *dev =
+               container_of(chip, struct adp5588_gpio, gpio_chip);
+       return dev->irq_base + off;
+}
+
+static void adp5588_irq_bus_lock(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       mutex_lock(&dev->irq_lock);
+}
+
+ /*
+  * genirq core code can issue chip->mask/unmask from atomic context.
+  * This doesn't work for slow busses where an access needs to sleep.
+  * bus_sync_unlock() is therefore called outside the atomic context,
+  * syncs the current irq mask state with the slow external controller
+  * and unlocks the bus.
+  */
+
+static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       int i;
+
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+               if (dev->int_en[i] ^ dev->irq_mask[i]) {
+                       dev->int_en[i] = dev->irq_mask[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
+                                          dev->int_en[i]);
+               }
+
+       mutex_unlock(&dev->irq_lock);
+}
+
+static void adp5588_irq_mask(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       unsigned gpio = irq - dev->irq_base;
+
+       dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
+}
+
+static void adp5588_irq_unmask(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       unsigned gpio = irq - dev->irq_base;
+
+       dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
+}
+
+static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       uint16_t gpio = irq - dev->irq_base;
+       unsigned bank, bit;
+
+       if ((type & IRQ_TYPE_EDGE_BOTH)) {
+               dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
+                       irq, type);
+               return -EINVAL;
+       }
+
+       bank = ADP5588_BANK(gpio);
+       bit = ADP5588_BIT(gpio);
+
+       if (type & IRQ_TYPE_LEVEL_HIGH)
+               dev->int_lvl[bank] |= bit;
+       else if (type & IRQ_TYPE_LEVEL_LOW)
+               dev->int_lvl[bank] &= ~bit;
+       else
+               return -EINVAL;
+
+       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
+       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
+                          dev->int_lvl[bank]);
+
+       return 0;
+}
+
+static struct irq_chip adp5588_irq_chip = {
+       .name                   = "adp5588",
+       .mask                   = adp5588_irq_mask,
+       .unmask                 = adp5588_irq_unmask,
+       .bus_lock               = adp5588_irq_bus_lock,
+       .bus_sync_unlock        = adp5588_irq_bus_sync_unlock,
+       .set_type               = adp5588_irq_set_type,
+};
+
+static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
+{
+       int ret = i2c_smbus_read_i2c_block_data(client, GPIO_INT_STAT1, 3, buf);
+
+       if (ret < 0)
+               dev_err(&client->dev, "Read INT_STAT Error\n");
+
+       return ret;
+}
+
+static irqreturn_t adp5588_irq_handler(int irq, void *devid)
+{
+       struct adp5588_gpio *dev = devid;
+       unsigned status, bank, bit, pending;
+       int ret;
+       status = adp5588_gpio_read(dev->client, INT_STAT);
+
+       if (status & ADP5588_GPI_INT) {
+               ret = adp5588_gpio_read_intstat(dev->client, dev->irq_stat);
+               if (ret < 0)
+                       memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
+
+               for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+                       bank++, bit = 0) {
+                       pending = dev->irq_stat[bank] & dev->irq_mask[bank];
+
+                       while (pending) {
+                               if (pending & (1 << bit)) {
+                                       handle_nested_irq(dev->irq_base +
+                                                         (bank << 3) + bit);
+                                       pending &= ~(1 << bit);
+
+                               }
+                               bit++;
+                       }
+               }
+       }
+
+       adp5588_gpio_write(dev->client, INT_STAT, status); /* Status is W1C */
+
+       return IRQ_HANDLED;
+}
+
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+       struct i2c_client *client = dev->client;
+       struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+       unsigned gpio;
+       int ret;
+
+       adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC);
+       adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */
+       adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */
+
+       dev->irq_base = pdata->irq_base;
+       mutex_init(&dev->irq_lock);
+
+       for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) {
+               int irq = gpio + dev->irq_base;
+               set_irq_chip_data(irq, dev);
+               set_irq_chip_and_handler(irq, &adp5588_irq_chip,
+                                        handle_level_irq);
+               set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+               /*
+                * ARM needs us to explicitly flag the IRQ as VALID,
+                * once we do so, it will also set the noprobe.
+                */
+               set_irq_flags(irq, IRQF_VALID);
+#else
+               set_irq_noprobe(irq);
+#endif
+       }
+
+       ret = request_threaded_irq(client->irq,
+                                  NULL,
+                                  adp5588_irq_handler,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  dev_name(&client->dev), dev);
+       if (ret) {
+               dev_err(&client->dev, "failed to request irq %d\n",
+                       client->irq);
+               goto out;
+       }
+
+       dev->gpio_chip.to_irq = adp5588_gpio_to_irq;
+       adp5588_gpio_write(client, CFG,
+               ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT);
+
+       return 0;
+
+out:
+       dev->irq_base = 0;
+       return ret;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+       if (dev->irq_base)
+               free_irq(dev->client->irq, dev);
+}
+
+#else
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+       struct i2c_client *client = dev->client;
+       dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+       return 0;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+}
+#endif /* CONFIG_GPIO_ADP5588_IRQ */
+
 static int __devinit adp5588_gpio_probe(struct i2c_client *client,
                                        const struct i2c_device_id *id)
 {
@@ -160,37 +380,46 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
        gc->can_sleep = 1;
 
        gc->base = pdata->gpio_start;
-       gc->ngpio = MAXGPIO;
+       gc->ngpio = ADP5588_MAXGPIO;
        gc->label = client->name;
        gc->owner = THIS_MODULE;
 
        mutex_init(&dev->lock);
 
-
        ret = adp5588_gpio_read(dev->client, DEV_ID);
        if (ret < 0)
                goto err;
 
        revid = ret & ADP5588_DEVICE_ID_MASK;
 
-       for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
+       for (i = 0, ret = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
                dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
                dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
                ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
                ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
                                (pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
-
+               ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0);
                if (ret)
                        goto err;
        }
 
+       if (pdata->irq_base) {
+               if (WA_DELAYED_READOUT_REVID(revid)) {
+                       dev_warn(&client->dev, "GPIO int not supported\n");
+               } else {
+                       ret = adp5588_irq_setup(dev);
+                       if (ret)
+                               goto err;
+               }
+       }
+
        ret = gpiochip_add(&dev->gpio_chip);
        if (ret)
-               goto err;
+               goto err_irq;
 
-       dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
+       dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
                        gc->base, gc->base + gc->ngpio - 1,
-                       client->name, revid);
+                       pdata->irq_base, client->name, revid);
 
        if (pdata->setup) {
                ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
@@ -199,8 +428,11 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
        }
 
        i2c_set_clientdata(client, dev);
+
        return 0;
 
+err_irq:
+       adp5588_irq_teardown(dev);
 err:
        kfree(dev);
        return ret;
@@ -222,6 +454,9 @@ static int __devexit adp5588_gpio_remove(struct i2c_client *client)
                }
        }
 
+       if (dev->irq_base)
+               free_irq(dev->client->irq, dev);
+
        ret = gpiochip_remove(&dev->gpio_chip);
        if (ret) {
                dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/basic_mmio_gpio.c
new file mode 100644 (file)
index 0000000..3addea6
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * Driver for basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
+ * ...``                                                         ```````..
+ * ..The simplest form of a GPIO controller that the driver supports is``
+ *  `.just a single "data" register, where GPIO state can be read and/or `
+ *    `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.```````
+ *        `````````
+                                    ___
+_/~~|___/~|   . ```~~~~~~       ___/___\___     ,~.`.`.`.`````.~~...,,,,...
+__________|~$@~~~        %~    /o*o*o*o*o*o\   .. Implementing such a GPIO .
+o        `                     ~~~~\___/~~~~    ` controller in FPGA is ,.`
+                                                 `....trivial..'~`.```.```
+ *                                                    ```````
+ *  .```````~~~~`..`.``.``.
+ * .  The driver supports  `...       ,..```.`~~~```````````````....````.``,,
+ * .   big-endian notation, just`.  .. A bit more sophisticated controllers ,
+ *  . register the device with -be`. .with a pair of set/clear-bit registers ,
+ *   `.. suffix.  ```~~`````....`.`   . affecting the data register and the .`
+ *     ``.`.``...```                  ```.. output pins are also supported.`
+ *                        ^^             `````.`````````.,``~``~``~~``````
+ *                                                   .                  ^^
+ *   ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`..
+ * .. The expectation is that in at least some cases .    ,-~~~-,
+ *  .this will be used with roll-your-own ASIC/FPGA .`     \   /
+ *  .logic in Verilog or VHDL. ~~~`````````..`````~~`       \ /
+ *  ..````````......```````````                             \o_
+ *                                                           |
+ *                              ^^                          / \
+ *
+ *           ...`````~~`.....``.`..........``````.`.``.```........``.
+ *            `  8, 16, 32 and 64 bits registers are supported, and``.
+ *            . the number of GPIOs is determined by the width of   ~
+ *             .. the registers. ,............```.`.`..`.`.~~~.`.`.`~
+ *               `.......````.```
+ */
+
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/basic_mmio_gpio.h>
+
+struct bgpio_chip {
+       struct gpio_chip gc;
+       void __iomem *reg_dat;
+       void __iomem *reg_set;
+       void __iomem *reg_clr;
+
+       /* Number of bits (GPIOs): <register width> * 8. */
+       int bits;
+
+       /*
+        * Some GPIO controllers work with the big-endian bits notation,
+        * e.g. in a 8-bits register, GPIO7 is the least significant bit.
+        */
+       int big_endian_bits;
+
+       /*
+        * Used to lock bgpio_chip->data. Also, this is needed to keep
+        * shadowed and real data registers writes together.
+        */
+       spinlock_t lock;
+
+       /* Shadowed data register to clear/set bits safely. */
+       unsigned long data;
+};
+
+static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+{
+       return container_of(gc, struct bgpio_chip, gc);
+}
+
+static unsigned long bgpio_in(struct bgpio_chip *bgc)
+{
+       switch (bgc->bits) {
+       case 8:
+               return __raw_readb(bgc->reg_dat);
+       case 16:
+               return __raw_readw(bgc->reg_dat);
+       case 32:
+               return __raw_readl(bgc->reg_dat);
+#if BITS_PER_LONG >= 64
+       case 64:
+               return __raw_readq(bgc->reg_dat);
+#endif
+       }
+       return -EINVAL;
+}
+
+static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg,
+                     unsigned long data)
+{
+       switch (bgc->bits) {
+       case 8:
+               __raw_writeb(data, reg);
+               return;
+       case 16:
+               __raw_writew(data, reg);
+               return;
+       case 32:
+               __raw_writel(data, reg);
+               return;
+#if BITS_PER_LONG >= 64
+       case 64:
+               __raw_writeq(data, reg);
+               return;
+#endif
+       }
+}
+
+static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
+{
+       if (bgc->big_endian_bits)
+               return 1 << (bgc->bits - 1 - pin);
+       else
+               return 1 << pin;
+}
+
+static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+       return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio);
+}
+
+static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long mask = bgpio_pin2mask(bgc, gpio);
+       unsigned long flags;
+
+       if (bgc->reg_set) {
+               if (val)
+                       bgpio_out(bgc, bgc->reg_set, mask);
+               else
+                       bgpio_out(bgc, bgc->reg_clr, mask);
+               return;
+       }
+
+       spin_lock_irqsave(&bgc->lock, flags);
+
+       if (val)
+               bgc->data |= mask;
+       else
+               bgc->data &= ~mask;
+
+       bgpio_out(bgc, bgc->reg_dat, bgc->data);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       return 0;
+}
+
+static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       bgpio_set(gc, gpio, val);
+       return 0;
+}
+
+static int __devinit bgpio_probe(struct platform_device *pdev)
+{
+       const struct platform_device_id *platid = platform_get_device_id(pdev);
+       struct device *dev = &pdev->dev;
+       struct bgpio_pdata *pdata = dev_get_platdata(dev);
+       struct bgpio_chip *bgc;
+       struct resource *res_dat;
+       struct resource *res_set;
+       struct resource *res_clr;
+       resource_size_t dat_sz;
+       int bits;
+       int ret;
+
+       res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+       if (!res_dat)
+               return -EINVAL;
+
+       dat_sz = resource_size(res_dat);
+       if (!is_power_of_2(dat_sz))
+               return -EINVAL;
+
+       bits = dat_sz * 8;
+       if (bits > BITS_PER_LONG)
+               return -EINVAL;
+
+       bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL);
+       if (!bgc)
+               return -ENOMEM;
+
+       bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz);
+       if (!bgc->reg_dat)
+               return -ENOMEM;
+
+       res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set");
+       res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr");
+       if (res_set && res_clr) {
+               if (resource_size(res_set) != resource_size(res_clr) ||
+                               resource_size(res_set) != dat_sz)
+                       return -EINVAL;
+
+               bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz);
+               bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz);
+               if (!bgc->reg_set || !bgc->reg_clr)
+                       return -ENOMEM;
+       } else if (res_set || res_clr) {
+               return -EINVAL;
+       }
+
+       spin_lock_init(&bgc->lock);
+
+       bgc->bits = bits;
+       bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be");
+       bgc->data = bgpio_in(bgc);
+
+       bgc->gc.ngpio = bits;
+       bgc->gc.direction_input = bgpio_dir_in;
+       bgc->gc.direction_output = bgpio_dir_out;
+       bgc->gc.get = bgpio_get;
+       bgc->gc.set = bgpio_set;
+       bgc->gc.dev = dev;
+       bgc->gc.label = dev_name(dev);
+
+       if (pdata)
+               bgc->gc.base = pdata->base;
+       else
+               bgc->gc.base = -1;
+
+       dev_set_drvdata(dev, bgc);
+
+       ret = gpiochip_add(&bgc->gc);
+       if (ret)
+               dev_err(dev, "gpiochip_add() failed: %d\n", ret);
+
+       return ret;
+}
+
+static int __devexit bgpio_remove(struct platform_device *pdev)
+{
+       struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev);
+
+       return gpiochip_remove(&bgc->gc);
+}
+
+static const struct platform_device_id bgpio_id_table[] = {
+       { "basic-mmio-gpio", },
+       { "basic-mmio-gpio-be", },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+
+static struct platform_driver bgpio_driver = {
+       .driver = {
+               .name = "basic-mmio-gpio",
+       },
+       .id_table = bgpio_id_table,
+       .probe = bgpio_probe,
+       .remove = __devexit_p(bgpio_remove),
+};
+
+static int __init bgpio_init(void)
+{
+       return platform_driver_register(&bgpio_driver);
+}
+module_init(bgpio_init);
+
+static void __exit bgpio_exit(void)
+{
+       platform_driver_unregister(&bgpio_driver);
+}
+module_exit(bgpio_exit);
+
+MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
+MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
+MODULE_LICENSE("GPL");
index 8383a8d..64db9dc 100644 (file)
 /* Supports:
  * Moorestown platform Langwell chip.
  * Medfield platform Penwell chip.
+ * Whitney point.
  */
 
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/stddef.h>
@@ -158,15 +160,15 @@ static int lnw_irq_type(unsigned irq, unsigned type)
        spin_unlock_irqrestore(&lnw->lock, flags);
 
        return 0;
-};
+}
 
 static void lnw_irq_unmask(unsigned irq)
 {
-};
+}
 
 static void lnw_irq_mask(unsigned irq)
 {
-};
+}
 
 static struct irq_chip lnw_irqchip = {
        .name           = "LNW-GPIO",
@@ -300,9 +302,88 @@ static struct pci_driver lnw_gpio_driver = {
        .probe          = lnw_gpio_probe,
 };
 
+
+static int __devinit wp_gpio_probe(struct platform_device *pdev)
+{
+       struct lnw_gpio *lnw;
+       struct gpio_chip *gc;
+       struct resource *rc;
+       int retval = 0;
+
+       rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!rc)
+               return -EINVAL;
+
+       lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+       if (!lnw) {
+               dev_err(&pdev->dev,
+                       "can't allocate whitneypoint_gpio chip data\n");
+               return -ENOMEM;
+       }
+       lnw->reg_base = ioremap_nocache(rc->start, resource_size(rc));
+       if (lnw->reg_base == NULL) {
+               retval = -EINVAL;
+               goto err_kmalloc;
+       }
+       spin_lock_init(&lnw->lock);
+       gc = &lnw->chip;
+       gc->label = dev_name(&pdev->dev);
+       gc->owner = THIS_MODULE;
+       gc->direction_input = lnw_gpio_direction_input;
+       gc->direction_output = lnw_gpio_direction_output;
+       gc->get = lnw_gpio_get;
+       gc->set = lnw_gpio_set;
+       gc->to_irq = NULL;
+       gc->base = 0;
+       gc->ngpio = 64;
+       gc->can_sleep = 0;
+       retval = gpiochip_add(gc);
+       if (retval) {
+               dev_err(&pdev->dev, "whitneypoint gpiochip_add error %d\n",
+                                                               retval);
+               goto err_ioremap;
+       }
+       platform_set_drvdata(pdev, lnw);
+       return 0;
+err_ioremap:
+       iounmap(lnw->reg_base);
+err_kmalloc:
+       kfree(lnw);
+       return retval;
+}
+
+static int __devexit wp_gpio_remove(struct platform_device *pdev)
+{
+       struct lnw_gpio *lnw = platform_get_drvdata(pdev);
+       int err;
+       err = gpiochip_remove(&lnw->chip);
+       if (err)
+               dev_err(&pdev->dev, "failed to remove gpio_chip.\n");
+       iounmap(lnw->reg_base);
+       kfree(lnw);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static struct platform_driver wp_gpio_driver = {
+       .probe          = wp_gpio_probe,
+       .remove         = __devexit_p(wp_gpio_remove),
+       .driver         = {
+               .name   = "wp_gpio",
+               .owner  = THIS_MODULE,
+       },
+};
+
 static int __init lnw_gpio_init(void)
 {
-       return pci_register_driver(&lnw_gpio_driver);
+       int ret;
+       ret =  pci_register_driver(&lnw_gpio_driver);
+       if (ret < 0)
+               return ret;
+       ret = platform_driver_register(&wp_gpio_driver);
+       if (ret < 0)
+               pci_unregister_driver(&lnw_gpio_driver);
+       return ret;
 }
 
 device_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
new file mode 100644 (file)
index 0000000..0eba0a7
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCH_GPIO_ALL_PINS      0xfff /* Mask for GPIO pins 0 to 11 */
+#define GPIO_NUM_PINS  12      /* Specifies number of GPIO PINS GPIO0-GPIO11 */
+
+struct pch_regs {
+       u32     ien;
+       u32     istatus;
+       u32     idisp;
+       u32     iclr;
+       u32     imask;
+       u32     imaskclr;
+       u32     po;
+       u32     pi;
+       u32     pm;
+       u32     im0;
+       u32     im1;
+       u32     reserved[4];
+       u32     reset;
+};
+
+/**
+ * struct pch_gpio_reg_data - The register store data.
+ * @po_reg:    To store contents of PO register.
+ * @pm_reg:    To store contents of PM register.
+ */
+struct pch_gpio_reg_data {
+       u32 po_reg;
+       u32 pm_reg;
+};
+
+/**
+ * struct pch_gpio - GPIO private data structure.
+ * @base:                      PCI base address of Memory mapped I/O register.
+ * @reg:                       Memory mapped PCH GPIO register list.
+ * @dev:                       Pointer to device structure.
+ * @gpio:                      Data for GPIO infrastructure.
+ * @pch_gpio_reg:              Memory mapped Register data is saved here
+ *                             when suspend.
+ */
+struct pch_gpio {
+       void __iomem *base;
+       struct pch_regs __iomem *reg;
+       struct device *dev;
+       struct gpio_chip gpio;
+       struct pch_gpio_reg_data pch_gpio_reg;
+       struct mutex lock;
+};
+
+static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+       u32 reg_val;
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+       mutex_lock(&chip->lock);
+       reg_val = ioread32(&chip->reg->po);
+       if (val)
+               reg_val |= (1 << nr);
+       else
+               reg_val &= ~(1 << nr);
+
+       iowrite32(reg_val, &chip->reg->po);
+       mutex_unlock(&chip->lock);
+}
+
+static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+       return ioread32(&chip->reg->pi) & (1 << nr);
+}
+
+static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+                                    int val)
+{
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+       u32 pm;
+       u32 reg_val;
+
+       mutex_lock(&chip->lock);
+       pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS;
+       pm |= (1 << nr);
+       iowrite32(pm, &chip->reg->pm);
+
+       reg_val = ioread32(&chip->reg->po);
+       if (val)
+               reg_val |= (1 << nr);
+       else
+               reg_val &= ~(1 << nr);
+
+       mutex_unlock(&chip->lock);
+
+       return 0;
+}
+
+static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+       u32 pm;
+
+       mutex_lock(&chip->lock);
+       pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS; /*bits 0-11*/
+       pm &= ~(1 << nr);
+       iowrite32(pm, &chip->reg->pm);
+       mutex_unlock(&chip->lock);
+
+       return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
+{
+       chip->pch_gpio_reg.po_reg = ioread32(&chip->reg->po);
+       chip->pch_gpio_reg.pm_reg = ioread32(&chip->reg->pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
+{
+       /* to store contents of PO register */
+       iowrite32(chip->pch_gpio_reg.po_reg, &chip->reg->po);
+       /* to store contents of PM register */
+       iowrite32(chip->pch_gpio_reg.pm_reg, &chip->reg->pm);
+}
+
+static void pch_gpio_setup(struct pch_gpio *chip)
+{
+       struct gpio_chip *gpio = &chip->gpio;
+
+       gpio->label = dev_name(chip->dev);
+       gpio->owner = THIS_MODULE;
+       gpio->direction_input = pch_gpio_direction_input;
+       gpio->get = pch_gpio_get;
+       gpio->direction_output = pch_gpio_direction_output;
+       gpio->set = pch_gpio_set;
+       gpio->dbg_show = NULL;
+       gpio->base = -1;
+       gpio->ngpio = GPIO_NUM_PINS;
+       gpio->can_sleep = 0;
+}
+
+static int __devinit pch_gpio_probe(struct pci_dev *pdev,
+                                   const struct pci_device_id *id)
+{
+       s32 ret;
+       struct pch_gpio *chip;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (chip == NULL)
+               return -ENOMEM;
+
+       chip->dev = &pdev->dev;
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "%s : pci_enable_device FAILED", __func__);
+               goto err_pci_enable;
+       }
+
+       ret = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
+               goto err_request_regions;
+       }
+
+       chip->base = pci_iomap(pdev, 1, 0);
+       if (chip->base == 0) {
+               dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
+               ret = -ENOMEM;
+               goto err_iomap;
+       }
+
+       chip->reg = chip->base;
+       pci_set_drvdata(pdev, chip);
+       mutex_init(&chip->lock);
+       pch_gpio_setup(chip);
+       ret = gpiochip_add(&chip->gpio);
+       if (ret) {
+               dev_err(&pdev->dev, "PCH gpio: Failed to register GPIO\n");
+               goto err_gpiochip_add;
+       }
+
+       return 0;
+
+err_gpiochip_add:
+       pci_iounmap(pdev, chip->base);
+
+err_iomap:
+       pci_release_regions(pdev);
+
+err_request_regions:
+       pci_disable_device(pdev);
+
+err_pci_enable:
+       kfree(chip);
+       dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+       return ret;
+}
+
+static void __devexit pch_gpio_remove(struct pci_dev *pdev)
+{
+       int err;
+       struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+       err = gpiochip_remove(&chip->gpio);
+       if (err)
+               dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+
+       pci_iounmap(pdev, chip->base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int pch_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       s32 ret;
+       struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+       pch_gpio_save_reg_conf(chip);
+       pch_gpio_restore_reg_conf(chip);
+
+       ret = pci_save_state(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+               return ret;
+       }
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+       ret = pci_enable_wake(pdev, PCI_D0, 1);
+       if (ret)
+               dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+       return 0;
+}
+
+static int pch_gpio_resume(struct pci_dev *pdev)
+{
+       s32 ret;
+       struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+       ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+       pci_set_power_state(pdev, PCI_D0);
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+               return ret;
+       }
+       pci_restore_state(pdev);
+
+       iowrite32(0x01, &chip->reg->reset);
+       iowrite32(0x00, &chip->reg->reset);
+       pch_gpio_restore_reg_conf(chip);
+
+       return 0;
+}
+#else
+#define pch_gpio_suspend NULL
+#define pch_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+       { 0, }
+};
+
+static struct pci_driver pch_gpio_driver = {
+       .name = "pch_gpio",
+       .id_table = pch_gpio_pcidev_id,
+       .probe = pch_gpio_probe,
+       .remove = __devexit_p(pch_gpio_remove),
+       .suspend = pch_gpio_suspend,
+       .resume = pch_gpio_resume
+};
+
+static int __init pch_gpio_pci_init(void)
+{
+       return pci_register_driver(&pch_gpio_driver);
+}
+module_init(pch_gpio_pci_init);
+
+static void __exit pch_gpio_pci_exit(void)
+{
+       pci_unregister_driver(&pch_gpio_driver);
+}
+module_exit(pch_gpio_pci_exit);
+
+MODULE_DESCRIPTION("PCH GPIO PCI Driver");
+MODULE_LICENSE("GPL");
index ddd0531..4529366 100644 (file)
@@ -47,6 +47,7 @@ struct timbgpio {
        spinlock_t              lock; /* mutual exclusion */
        struct gpio_chip        gpio;
        int                     irq_base;
+       unsigned long           last_ier;
 };
 
 static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
@@ -112,16 +113,24 @@ static void timbgpio_irq_disable(unsigned irq)
 {
        struct timbgpio *tgpio = get_irq_chip_data(irq);
        int offset = irq - tgpio->irq_base;
+       unsigned long flags;
 
-       timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+       spin_lock_irqsave(&tgpio->lock, flags);
+       tgpio->last_ier &= ~(1 << offset);
+       iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+       spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
 static void timbgpio_irq_enable(unsigned irq)
 {
        struct timbgpio *tgpio = get_irq_chip_data(irq);
        int offset = irq - tgpio->irq_base;
+       unsigned long flags;
 
-       timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+       spin_lock_irqsave(&tgpio->lock, flags);
+       tgpio->last_ier |= 1 << offset;
+       iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+       spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
 static int timbgpio_irq_type(unsigned irq, unsigned trigger)
@@ -194,8 +203,16 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
        ipr = ioread32(tgpio->membase + TGPIO_IPR);
        iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
+       /*
+        * Some versions of the hardware trash the IER register if more than
+        * one interrupt is received simultaneously.
+        */
+       iowrite32(0, tgpio->membase + TGPIO_IER);
+
        for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
                generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+
+       iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
 }
 
 static struct irq_chip timbgpio_irqchip = {
index 4cb4bb0..53fab51 100644 (file)
@@ -560,7 +560,8 @@ static const struct pci_device_id scx200_pci[] __initconst = {
        { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
          .driver_data = 1 },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
-         .driver_data = 2 }
+         .driver_data = 2 },
+       { 0, }
 };
 
 static struct {
index af25e1f..e90db88 100644 (file)
@@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
                mdelay(10);
                hw->ipac.isac.adf2 = 0x87;
                hw->ipac.hscx[0].slot = 0x1f;
-               hw->ipac.hscx[0].slot = 0x23;
+               hw->ipac.hscx[1].slot = 0x23;
                break;
        case INF_GAZEL_R753:
                val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
index b0554f8..ee4dae1 100644 (file)
@@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
        char tmp[80];
        struct sk_buff *skb = arg;
 
-       p = skb->data;
-
        /* Channel Identification */
-       p = skb->data;
-       if ((p = findie(p, skb->len, WE0_chanID, 0))) {
+       p = findie(skb->data, skb->len, WE0_chanID, 0);
+       if (p) {
                if (p[1] != 1) {
                        l3_1tr6_error(pc, "setup wrong chanID len", skb);
                        return;
index 0acf639..2025818 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/completion.h>
-#include <linux/errno.h>
 #include <linux/mutex.h>
 #include <linux/wait.h>
 #include <linux/unistd.h>
index ef4115b..9ab5809 100644 (file)
@@ -631,8 +631,6 @@ struct atl1c_adapter {
 extern char atl1c_driver_name[];
 extern char atl1c_driver_version[];
 
-extern int atl1c_up(struct atl1c_adapter *adapter);
-extern void atl1c_down(struct atl1c_adapter *adapter);
 extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
 extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
 extern void atl1c_set_ethtool_ops(struct net_device *netdev);
index 99ffcf6..09b099b 100644 (file)
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
 static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
 static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
                   int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
 
 static const u16 atl1c_pay_load_size[] = {
        128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
        return err;
 }
 
-int atl1c_up(struct atl1c_adapter *adapter)
+static int atl1c_up(struct atl1c_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
        return err;
 }
 
-void atl1c_down(struct atl1c_adapter *adapter)
+static void atl1c_down(struct atl1c_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
index dbd27b8..43579b3 100644 (file)
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
 /* Temporary hack for merging atl1 and atl2 */
 #include "atlx.c"
 
+static const struct ethtool_ops atl1_ethtool_ops;
+
 /*
  * This is the only thing that needs to be changed to adjust the
  * maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
  * hw - Struct containing variables accessed by shared code
  * reg_addr - address of the PHY register to read
  */
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
 {
        u32 val;
        int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
  *          1. calcu 32bit CRC for multicast address
  *          2. reverse crc with MSB to LSB
  */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
 {
        u32 crc32, value = 0;
        int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
  * hw - Struct containing variables accessed by shared code
  * hash_value - Multicast address hash value
  */
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
 {
        u32 hash_bit, hash_reg;
        u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
        return 0;
 }
 
-void atl1_set_mac_addr(struct atl1_hw *hw)
+static void atl1_set_mac_addr(struct atl1_hw *hw)
 {
        u32 value;
        /*
@@ -3658,7 +3660,7 @@ static int atl1_nway_reset(struct net_device *netdev)
        return 0;
 }
 
-const struct ethtool_ops atl1_ethtool_ops = {
+static const struct ethtool_ops atl1_ethtool_ops = {
        .get_settings           = atl1_get_settings,
        .set_settings           = atl1_set_settings,
        .get_drvinfo            = atl1_get_drvinfo,
index 9c0ddb2..68de8cb 100644 (file)
@@ -56,16 +56,13 @@ struct atl1_adapter;
 struct atl1_hw;
 
 /* function prototypes needed by multiple files */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
        int cmd);
 static u32 atl1_check_link(struct atl1_adapter *adapter);
 
-extern const struct ethtool_ops atl1_ethtool_ops;
-
 /* hardware definitions specific to L1 */
 
 /* Block IDLE Status Register */
index f979ea2..afb7f7d 100644 (file)
 
 #include "atlx.h"
 
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
 static struct atlx_spi_flash_dev flash_table[] = {
 /*     MFR_NAME  WRSR  READ  PRGM  WREN  WRDI  RDSR  RDID  SEC_ERS CHIP_ERS */
        {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52,   0x62},
index 1e7f305..36eca1c 100644 (file)
@@ -1471,42 +1471,6 @@ err:
        return status;
 }
 
-/* Uses sync mcc */
-int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
-                               u8 *connector)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_port_type *req;
-       int status;
-
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
-       req = embedded_payload(wrb);
-
-       be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
-                       OPCODE_COMMON_READ_TRANSRECV_DATA);
-
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
-
-       req->port = cpu_to_le32(port);
-       req->page_num = cpu_to_le32(TR_PAGE_A0);
-       status = be_mcc_notify_wait(adapter);
-       if (!status) {
-               struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
-                       *connector = resp->data.connector;
-       }
-
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
-       return status;
-}
-
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
                        u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
index c7f6cdf..8469ff0 100644 (file)
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
                        u8 port_num, u8 beacon, u8 status, u8 state);
 extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
                        u8 port_num, u32 *state);
-extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
-                                       u8 *connector);
 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
                        struct be_dma_mem *cmd, u32 flash_oper,
                        u32 flash_opcode, u32 buf_size);
index 45b1f66..c36cd2f 100644 (file)
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
                stats->rx_mcast_pkts++;
 }
 
-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
 {
-       u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+       u8 l4_cksm, ipv6, ipcksm;
 
        l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
        ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
-       ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
-       if (ip_version) {
-               tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
-               udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
-       }
-       ipv6_chk = (ip_version && (tcpf || udpf));
+       ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
 
-       return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+       /* Ignore ipcksm for ipv6 pkts */
+       return l4_cksm && (ipcksm || ipv6);
 }
 
 static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
 
        skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
 
-       if (do_pkt_csum(rxcp, adapter->rx_csum))
-               skb_checksum_none_assert(skb);
-       else
+       if (likely(adapter->rx_csum && csum_passed(rxcp)))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
+       else
+               skb_checksum_none_assert(skb);
 
        skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
        return (tcp_frame && !err) ? true : false;
 }
 
-int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_poll_rx(struct napi_struct *napi, int budget)
 {
        struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
        struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
        struct be_rx_obj *rxo;
        int i;
 
+       /* when interrupts are not yet enabled, just reap any pending
+       * mcc completions */
+       if (!netif_running(adapter->netdev)) {
+               int mcc_compl, status = 0;
+
+               mcc_compl = be_process_mcc(adapter, &status);
+
+               if (mcc_compl) {
+                       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+                       be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+               }
+               goto reschedule;
+       }
+
        if (!adapter->stats_ioctl_sent)
                be_cmd_get_stats(adapter, &adapter->stats_cmd);
 
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
        if (!adapter->ue_detected)
                be_detect_dump_ue(adapter);
 
+reschedule:
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
 
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
        struct be_eq_obj *tx_eq = &adapter->tx_eq;
        int vec, i;
 
-       cancel_delayed_work_sync(&adapter->work);
-
        be_async_mcc_disable(adapter);
 
        netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
        /* Now that interrupts are on we can process async mcc */
        be_async_mcc_enable(adapter);
 
-       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-
        status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
                        &link_speed);
        if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
 
 
 #define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
-char flash_cookie[2][16] =     {"*** SE FLAS",
-                               "H DIRECTORY *** "};
-
 static bool be_flash_redboot(struct be_adapter *adapter,
                        const u8 *p, u32 img_start, int image_size,
                        int hdr_size)
@@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
        netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
                BE_NAPI_WEIGHT);
 
-       netif_carrier_off(netdev);
        netif_stop_queue(netdev);
 }
 
@@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
        if (!adapter)
                return;
 
+       cancel_delayed_work_sync(&adapter->work);
+
        unregister_netdev(adapter->netdev);
 
        be_clear(adapter);
@@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
        status = register_netdev(netdev);
        if (status != 0)
                goto unsetup;
+       netif_carrier_off(netdev);
 
        dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 
 unsetup:
index 9571ecf..9eea225 100644 (file)
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
 
 #define WAIT_RAMROD_POLL       0x01
 #define WAIT_RAMROD_COMMON     0x02
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-                            int *state_p, int flags);
 
 /* dmae */
 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
                      u32 len32);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-                              u32 addr, u32 len);
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
 
 void bnx2x_calc_fc_adv(struct bnx2x *bp);
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
index bc58375..459614d 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "bnx2x_init.h"
 
+static int bnx2x_setup_irqs(struct bnx2x *bp);
 
 /* free skb in the packet ring at pos idx
  * return idx of last bd freed
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
 }
 
 
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
 {
        int rc = 0;
        if (bp->flags & USING_MSIX_FLAG) {
index 5bfe0ab..6b28739 100644 (file)
@@ -116,13 +116,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
  */
 void bnx2x_int_enable(struct bnx2x *bp);
 
-/**
- * Disable HW interrupts.
- *
- * @param bp
- */
-void bnx2x_int_disable(struct bnx2x *bp);
-
 /**
  * Disable interrupts. This function ensures that there are no
  * ISRs or SP DPCs (sp_task) are running after it returns.
@@ -191,17 +184,6 @@ void bnx2x_free_mem(struct bnx2x *bp);
 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                       int is_leading);
 
-/**
- * Bring down an eth client.
- *
- * @param bp
- * @param p
- *
- * @return int
- */
-int bnx2x_stop_fw_client(struct bnx2x *bp,
-                        struct bnx2x_client_ramrod_params *p);
-
 /**
  * Set number of queues according to mode
  *
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
  */
 void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
 
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). The function will wait until the ramrod completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
-#endif
-
-/**
- * Initialize status block in FW and HW
- *
- * @param bp driver handle
- * @param dma_addr_t mapping
- * @param int sb_id
- * @param int vfid
- * @param u8 vf_valid
- * @param int fw_sb_id
- * @param int igu_sb_id
- */
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
-                         u8 vf_valid, int fw_sb_id, int igu_sb_id);
-
 /**
  * Set MAC filtering configurations.
  *
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
  * @return int
  */
 int bnx2x_func_start(struct bnx2x *bp);
-int bnx2x_func_stop(struct bnx2x *bp);
 
 /**
  * Prepare ILT configurations according to current driver
@@ -395,14 +348,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
  */
 int bnx2x_enable_msi(struct bnx2x *bp);
 
-/**
- * Request IRQ vectors from OS.
- *
- * @param bp
- *
- * @return int
- */
-int bnx2x_setup_irqs(struct bnx2x *bp);
 /**
  * NAPI callback
  *
index e65de78..a306b0e 100644 (file)
@@ -16,7 +16,9 @@
 #define BNX2X_INIT_OPS_H
 
 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
-
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+                                     u32 addr, u32 len);
 
 static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
                              u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
        return rc;
 }
 
-int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
 {
        int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
        if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
        }
 }
 
-void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
                                      struct ilt_client_info *ilt_cli,
                                      u32 ilt_start, u8 initop)
 {
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
        }
 }
 
-void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
-                                 struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+                                        struct bnx2x_ilt *ilt,
+                                        struct ilt_client_info *ilt_cli,
+                                        u8 initop)
 {
        int i;
 
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
        bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
 }
 
-void bnx2x_ilt_client_init_op(struct bnx2x *bp,
-                             struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+                                    struct ilt_client_info *ilt_cli, u8 initop)
 {
        struct bnx2x_ilt *ilt = BP_ILT(bp);
 
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
        bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
 }
 
-void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
 {
        bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
        bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
  * called during init common stage, ilt clients should be initialized
  * prioir to calling this function
  */
-void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
 {
        bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
                                  PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
 #define QM_INIT(cid_cnt)       (cid_cnt > QM_INIT_MIN_CID_COUNT)
 
 /* called during init port stage */
-void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
-                            u8 initop)
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+                                   u8 initop)
 {
        int port = BP_PORT(bp);
 
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
 }
 
 /* called during init common stage */
-void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
-                            u8 initop)
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+                                   u8 initop)
 {
        if (!QM_INIT(qm_cid_count))
                return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
 ****************************************************************************/
 
 /* called during init func stage */
-void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
-                      dma_addr_t t2_mapping, int src_cid_count)
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+                             dma_addr_t t2_mapping, int src_cid_count)
 {
        int i;
        int port = BP_PORT(bp);
index 3e99bf9..2326774 100644 (file)
                (_bank + (_addr & 0xf)), \
                _val)
 
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+                         u8 devad, u16 reg, u16 *ret_val);
+
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+                          u8 devad, u16 reg, u16 val);
+
 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 {
        u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
        return 0;
 }
 
-u8 bnx2x_bmac_enable(struct link_params *params,
+static u8 bnx2x_bmac_enable(struct link_params *params,
                            struct link_vars *vars,
                            u8 is_lb)
 {
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
        }
 }
 
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requested led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
-                         u32 led_idx, u32 value)
-{
-       u32 reg_val;
-
-       /* If port 0 then use EMAC0, else use EMAC1*/
-       u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
-       DP(NETIF_MSG_LINK,
-                "bnx2x_override_led_value() port %x led_idx %d value %d\n",
-                port, led_idx, value);
-
-       switch (led_idx) {
-       case 0: /* 10MB led */
-               /* Read the current value of the LED register in
-               the EMAC block */
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /* Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /* If value is 1, set the 10M_OVERRIDE bit,
-               otherwise reset it.*/
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_10MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 1: /*100MB led    */
-               /*Read the current value of the LED register in
-               the EMAC block */
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /*  Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /*  If value is 1, set the 100M_OVERRIDE bit,
-               otherwise reset it.*/
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_100MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 2: /* 1000MB led */
-               /* Read the current value of the LED register in the
-               EMAC block */
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /* Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
-               reset it. */
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 3: /* 2500MB led */
-               /*  Read the current value of the LED register in the
-               EMAC block*/
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /* Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /*  If value is 1, set the 2500M_OVERRIDE bit, otherwise
-               reset it.*/
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 4: /*10G led */
-               if (port == 0) {
-                       REG_WR(bp, NIG_REG_LED_10G_P0,
-                                   value);
-               } else {
-                       REG_WR(bp, NIG_REG_LED_10G_P1,
-                                   value);
-               }
-               break;
-       case 5: /* TRAFFIC led */
-               /* Find if the traffic control is via BMAC or EMAC */
-               if (port == 0)
-                       reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
-               else
-                       reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
-
-               /*  Override the traffic led in the EMAC:*/
-               if (reg_val == 1) {
-                       /* Read the current value of the LED register in
-                       the EMAC block */
-                       reg_val = REG_RD(bp, emac_base +
-                                            EMAC_REG_EMAC_LED);
-                       /* Set the TRAFFIC_OVERRIDE bit to 1 */
-                       reg_val |= EMAC_LED_OVERRIDE;
-                       /* If value is 1, set the TRAFFIC bit, otherwise
-                       reset it.*/
-                       reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
-                               (reg_val & ~EMAC_LED_TRAFFIC);
-                       REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               } else { /* Override the traffic led in the BMAC: */
-                       REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
-                                  + port*4, 1);
-                       REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
-                                   value);
-               }
-               break;
-       default:
-               DP(NETIF_MSG_LINK,
-                        "bnx2x_override_led_value() unknown led index %d "
-                        "(should be 0-5)\n", led_idx);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-
 u8 bnx2x_set_led(struct link_params *params,
                 struct link_vars *vars, u8 mode, u32 speed)
 {
@@ -4099,9 +3989,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-                               struct link_params *params, u16 addr,
-                                    u8 byte_cnt, u8 *o_buf)
+static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+                                      struct link_params *params, u16 addr,
+                                      u8 byte_cnt, u8 *o_buf)
 {
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
                return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -6819,13 +6709,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
        return 0;
 }
 
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
-{
-       if (phy_idx < params->num_phys)
-               return params->phy[phy_idx].supported;
-       return 0;
-}
-
 static void set_phy_vars(struct link_params *params)
 {
        struct bnx2x *bp = params->bp;
index 58a4c71..171abf8 100644 (file)
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
 
 u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
                   u8 devad, u16 reg, u16 val);
-
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-                  u8 devad, u16 reg, u16 *ret_val);
-
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-                   u8 devad, u16 reg, u16 val);
 /* Reads the link_status from the shmem,
    and update the link vars accordingly */
 void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
 #define LED_MODE_OPER                  2
 #define LED_MODE_FRONT_PANEL_OFF       3
 
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
-
 /* bnx2x_handle_module_detect_int should be called upon module detection
    interrupt */
 void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
 /* Reset the external of SFX7101 */
 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
 
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-                               struct link_params *params, u16 addr,
-                             u8 byte_cnt, u8 *o_buf);
-
 void bnx2x_hw_reset_phy(struct link_params *params);
 
 /* Checks if HW lock is required for this phy/board type */
 u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
                          u32 shmem2_base);
 
-/* Returns the aggregative supported attributes of the phys on board */
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
-
 /* Check swap bit and adjust PHY order */
 u32 bnx2x_phy_selection(struct link_params *params);
 
index ff99a2f..e9ad16f 100644 (file)
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
 /* used only at init
  * locking is done by mcp
  */
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
 {
        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
        pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 #define DMAE_DP_DST_PCI                "pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE       "dst_addr [none]"
 
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+                         int msglvl)
 {
        u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
 
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
        return opcode;
 }
 
-void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
-                              u8 src_type, u8 dst_type)
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+                                     struct dmae_command *dmae,
+                                     u8 src_type, u8 dst_type)
 {
        memset(dmae, 0, sizeof(struct dmae_command));
 
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
 }
 
 /* issue a dmae command over the init-channel and wailt for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+                                     struct dmae_command *dmae)
 {
        u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
 
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-                              u32 addr, u32 len)
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+                                     u32 addr, u32 len)
 {
        int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
        int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
                BNX2X_ERR("BUG! proper val not read from IGU!\n");
 }
 
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
 {
        if (bp->common.int_block == INT_BLOCK_HC)
                bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
 }
 
 /* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
 {
        u32 mask = (1 << cl_id);
 
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
                bp->mac_filters.unmatched_unicast & ~mask;
 }
 
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
        struct tstorm_eth_function_common_config tcfg = {0};
        u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
        txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
 }
 
-void bnx2x_pf_init(struct bnx2x *bp)
+static void bnx2x_pf_init(struct bnx2x *bp)
 {
        struct bnx2x_func_init_params func_init = {0};
        struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
        hc_sm->time_to_expire = 0xFFFFFFFF;
 }
 
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                          u8 vf_valid, int fw_sb_id, int igu_sb_id)
 {
        int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
 /*
  * Init service functions
  */
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+                            int *state_p, int flags);
+
 int bnx2x_func_start(struct bnx2x *bp)
 {
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
                                 WAIT_RAMROD_COMMON);
 }
 
-int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
 {
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
 
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
        bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
 }
 
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-                     int *state_p, int flags)
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+                            int *state_p, int flags)
 {
        /* can take a while if any port is running */
        int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
        return -EBUSY;
 }
 
-u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
+static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
 {
        if (CHIP_IS_E1H(bp))
                return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
  *
  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
  */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
 {
        u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
                         bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
                                       ETH_CONNECTION_TYPE);
 }
 
-int bnx2x_setup_fw_client(struct bnx2x *bp,
-                         struct bnx2x_client_init_params *params,
-                         u8 activate,
-                         struct client_init_ramrod_data *data,
-                         dma_addr_t data_mapping)
+static int bnx2x_setup_fw_client(struct bnx2x *bp,
+                                struct bnx2x_client_init_params *params,
+                                u8 activate,
+                                struct client_init_ramrod_data *data,
+                                dma_addr_t data_mapping)
 {
        u16 hc_usec;
        int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        return rc;
 }
 
-int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_fw_client(struct bnx2x *bp,
+                               struct bnx2x_client_ramrod_params *p)
 {
        int rc;
 
@@ -7440,7 +7447,7 @@ reset_task_exit:
  * Init service functions
  */
 
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
 {
        u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
        u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
index beb3b7c..bdb68a6 100644 (file)
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
        struct slave *slave;
        int i;
 
-       write_lock(&bond->lock);
+       write_lock_bh(&bond->lock);
        bond->vlgrp = grp;
-       write_unlock(&bond->lock);
+       write_unlock_bh(&bond->lock);
 
        bond_for_each_slave(bond, slave, i) {
                struct net_device *slave_dev = slave->dev;
index 75bfc3a..09ed3f4 100644 (file)
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
        Putting the next command and length in the start of the frame can
        help to synchronize to the next transfer in case of over or under-runs.
        This option also needs to be enabled on the modem.
+
+config CAIF_SHM
+       tristate "CAIF shared memory protocol driver"
+       depends on CAIF && U5500_MBOX
+       default n
+       ---help---
+       The CAIF shared memory protocol driver for the STE UX5500 platform.
index 3a11d61..b38d987 100644 (file)
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
 # SPI slave physical interfaces module
 cfspi_slave-objs := caif_spi.o caif_spi_slave.o
 obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# Shared memory
+caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
+obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644 (file)
index 0000000..1cd90da
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <mach/mbox.h>
+#include <net/caif/caif_shm.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
+
+#define MAX_SHM_INSTANCES      1
+
+enum {
+       MBX_ACC0,
+       MBX_ACC1,
+       MBX_DSP
+};
+
+static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
+
+static unsigned int shm_start;
+static unsigned int shm_size;
+
+module_param(shm_size, uint  , 0440);
+MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
+
+module_param(shm_start, uint  , 0440);
+MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
+
+static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
+{
+       /* Always block until msg is written successfully */
+       mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
+       return 0;
+}
+
+static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
+                                                        void *pshm_drv)
+{
+       /*
+        * For UX5500, we have only 1 SHM instance which uses MBX0
+        * for communication with the peer modem
+        */
+       pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
+
+       if (!pshm_dev->hmbx)
+               return -ENODEV;
+       else
+               return 0;
+}
+
+static int __init caif_shmdev_init(void)
+{
+       int i, result;
+
+       /* Loop is currently overkill, there is only one instance */
+       for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+
+               shmdev_lyr[i].shm_base_addr = shm_start;
+               shmdev_lyr[i].shm_total_sz = shm_size;
+
+               if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
+                              || (shmdev_lyr[i].shm_total_sz <= 0))    {
+                       pr_warn("ERROR,"
+                               "Shared memory Address and/or Size incorrect"
+                               ", Bailing out ...\n");
+                       result = -EINVAL;
+                       goto clean;
+               }
+
+               pr_info("SHM AREA (instance %d) STARTS"
+                       " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
+
+               shmdev_lyr[i].shm_id = i;
+               shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
+               shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
+
+               /*
+                * Finally, CAIF core module is called with details in place:
+                * 1. SHM base address
+                * 2. SHM size
+                * 3. MBX handle
+                */
+               result = caif_shmcore_probe(&shmdev_lyr[i]);
+               if (result) {
+                       pr_warn("ERROR[%d],"
+                               "Could not probe SHM core (instance %d)"
+                               " Bailing out ...\n", result, i);
+                       goto clean;
+               }
+       }
+
+       return 0;
+
+clean:
+       /*
+        * For now, we assume that even if one instance of SHM fails, we bail
+        * out of the driver support completely. For this, we need to release
+        * any memory allocated and unregister any instance of SHM net device.
+        */
+       for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+               if (shmdev_lyr[i].pshm_netdev)
+                       unregister_netdev(shmdev_lyr[i].pshm_netdev);
+       }
+       return result;
+}
+
+static void __exit caif_shmdev_exit(void)
+{
+       int i;
+
+       for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+               caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
+               kfree((void *)shmdev_lyr[i].shm_base_addr);
+       }
+
+}
+
+module_init(caif_shmdev_init);
+module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644 (file)
index 0000000..19f9c06
--- /dev/null
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
+ *           Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_shm.h>
+
+#define NR_TX_BUF              6
+#define NR_RX_BUF              6
+#define TX_BUF_SZ              0x2000
+#define RX_BUF_SZ              0x2000
+
+#define CAIF_NEEDED_HEADROOM   32
+
+#define CAIF_FLOW_ON           1
+#define CAIF_FLOW_OFF          0
+
+#define LOW_WATERMARK          3
+#define HIGH_WATERMARK         4
+
+/* Maximum number of CAIF buffers per shared memory buffer. */
+#define SHM_MAX_FRMS_PER_BUF   10
+
+/*
+ * Size in bytes of the descriptor area
+ * (With end of descriptor signalling)
+ */
+#define SHM_CAIF_DESC_SIZE     ((SHM_MAX_FRMS_PER_BUF + 1) * \
+                                       sizeof(struct shm_pck_desc))
+
+/*
+ * Offset to the first CAIF frame within a shared memory buffer.
+ * Aligned on 32 bytes.
+ */
+#define SHM_CAIF_FRM_OFS       (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
+
+/* Number of bytes for CAIF shared memory header. */
+#define SHM_HDR_LEN            1
+
+/* Number of padding bytes for the complete CAIF frame. */
+#define SHM_FRM_PAD_LEN                4
+
+#define CAIF_MAX_MTU           4096
+
+#define SHM_SET_FULL(x)        (((x+1) & 0x0F) << 0)
+#define SHM_GET_FULL(x)        (((x >> 0) & 0x0F) - 1)
+
+#define SHM_SET_EMPTY(x)       (((x+1) & 0x0F) << 4)
+#define SHM_GET_EMPTY(x)       (((x >> 4) & 0x0F) - 1)
+
+#define SHM_FULL_MASK          (0x0F << 0)
+#define SHM_EMPTY_MASK         (0x0F << 4)
+
+struct shm_pck_desc {
+       /*
+        * Offset from start of shared memory area to start of
+        * shared memory CAIF frame.
+        */
+       u32 frm_ofs;
+       u32 frm_len;
+};
+
+struct buf_list {
+       unsigned char *desc_vptr;
+       u32 phy_addr;
+       u32 index;
+       u32 len;
+       u32 frames;
+       u32 frm_ofs;
+       struct list_head list;
+};
+
+struct shm_caif_frm {
+       /* Number of bytes of padding before the CAIF frame. */
+       u8 hdr_ofs;
+};
+
+struct shmdrv_layer {
+       /* caif_dev_common must always be first in the structure*/
+       struct caif_dev_common cfdev;
+
+       u32 shm_tx_addr;
+       u32 shm_rx_addr;
+       u32 shm_base_addr;
+       u32 tx_empty_available;
+       spinlock_t lock;
+
+       struct list_head tx_empty_list;
+       struct list_head tx_pend_list;
+       struct list_head tx_full_list;
+       struct list_head rx_empty_list;
+       struct list_head rx_pend_list;
+       struct list_head rx_full_list;
+
+       struct workqueue_struct *pshm_tx_workqueue;
+       struct workqueue_struct *pshm_rx_workqueue;
+
+       struct work_struct shm_tx_work;
+       struct work_struct shm_rx_work;
+
+       struct sk_buff_head sk_qhead;
+       struct shmdev_layer *pshm_dev;
+};
+
+static int shm_netdev_open(struct net_device *shm_netdev)
+{
+       netif_wake_queue(shm_netdev);
+       return 0;
+}
+
+static int shm_netdev_close(struct net_device *shm_netdev)
+{
+       netif_stop_queue(shm_netdev);
+       return 0;
+}
+
+int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
+{
+       struct buf_list *pbuf;
+       struct shmdrv_layer *pshm_drv;
+       struct list_head *pos;
+       u32 avail_emptybuff = 0;
+       unsigned long flags = 0;
+
+       pshm_drv = (struct shmdrv_layer *)priv;
+
+       /* Check for received buffers. */
+       if (mbx_msg & SHM_FULL_MASK) {
+               int idx;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check whether we have any outstanding buffers. */
+               if (list_empty(&pshm_drv->rx_empty_list)) {
+
+                       /* Release spin lock. */
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* We print even in IRQ context... */
+                       pr_warn("No empty Rx buffers to fill: "
+                                       "mbx_msg:%x\n", mbx_msg);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+
+               pbuf =
+                       list_entry(pshm_drv->rx_empty_list.next,
+                                       struct buf_list, list);
+               idx = pbuf->index;
+
+               /* Check buffer synchronization. */
+               if (idx != SHM_GET_FULL(mbx_msg)) {
+
+                       /* We print even in IRQ context... */
+                       pr_warn(
+                       "phyif_shm_mbx_msg_cb: RX full out of sync:"
+                       " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
+                               idx, mbx_msg, SHM_GET_FULL(mbx_msg));
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+
+               list_del_init(&pbuf->list);
+               list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
+
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+               /* Schedule RX work queue. */
+               if (!work_pending(&pshm_drv->shm_rx_work))
+                       queue_work(pshm_drv->pshm_rx_workqueue,
+                                               &pshm_drv->shm_rx_work);
+       }
+
+       /* Check for emptied buffers. */
+       if (mbx_msg & SHM_EMPTY_MASK) {
+               int idx;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check whether we have any outstanding buffers. */
+               if (list_empty(&pshm_drv->tx_full_list)) {
+
+                       /* We print even in IRQ context... */
+                       pr_warn("No TX to empty: msg:%x\n", mbx_msg);
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+
+               pbuf =
+                       list_entry(pshm_drv->tx_full_list.next,
+                                       struct buf_list, list);
+               idx = pbuf->index;
+
+               /* Check buffer synchronization. */
+               if (idx != SHM_GET_EMPTY(mbx_msg)) {
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* We print even in IRQ context... */
+                       pr_warn("TX empty "
+                               "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+               list_del_init(&pbuf->list);
+
+               /* Reset buffer parameters. */
+               pbuf->frames = 0;
+               pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+               list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
+
+               /* Check the available no. of buffers in the empty list */
+               list_for_each(pos, &pshm_drv->tx_empty_list)
+                       avail_emptybuff++;
+
+               /* Check whether we have to wake up the transmitter. */
+               if ((avail_emptybuff > HIGH_WATERMARK) &&
+                                       (!pshm_drv->tx_empty_available)) {
+                       pshm_drv->tx_empty_available = 1;
+                       pshm_drv->cfdev.flowctrl
+                                       (pshm_drv->pshm_dev->pshm_netdev,
+                                                               CAIF_FLOW_ON);
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* Schedule the work queue. if required */
+                       if (!work_pending(&pshm_drv->shm_tx_work))
+                               queue_work(pshm_drv->pshm_tx_workqueue,
+                                                       &pshm_drv->shm_tx_work);
+               } else
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+       }
+
+       return 0;
+
+err_sync:
+       return -EIO;
+}
+
+static void shm_rx_work_func(struct work_struct *rx_work)
+{
+       struct shmdrv_layer *pshm_drv;
+       struct buf_list *pbuf;
+       unsigned long flags = 0;
+       struct sk_buff *skb;
+       char *p;
+       int ret;
+
+       pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
+
+       while (1) {
+
+               struct shm_pck_desc *pck_desc;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check for received buffers. */
+               if (list_empty(&pshm_drv->rx_full_list)) {
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+                       break;
+               }
+
+               pbuf =
+                       list_entry(pshm_drv->rx_full_list.next, struct buf_list,
+                                       list);
+               list_del_init(&pbuf->list);
+
+               /* Retrieve pointer to start of the packet descriptor area. */
+               pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
+
+               /*
+                * Check whether descriptor contains a CAIF shared memory
+                * frame.
+                */
+               while (pck_desc->frm_ofs) {
+                       unsigned int frm_buf_ofs;
+                       unsigned int frm_pck_ofs;
+                       unsigned int frm_pck_len;
+                       /*
+                        * Check whether offset is within buffer limits
+                        * (lower).
+                        */
+                       if (pck_desc->frm_ofs <
+                               (pbuf->phy_addr - pshm_drv->shm_base_addr))
+                               break;
+                       /*
+                        * Check whether offset is within buffer limits
+                        * (higher).
+                        */
+                       if (pck_desc->frm_ofs >
+                               ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
+                                       pbuf->len))
+                               break;
+
+                       /* Calculate offset from start of buffer. */
+                       frm_buf_ofs =
+                               pck_desc->frm_ofs - (pbuf->phy_addr -
+                                               pshm_drv->shm_base_addr);
+
+                       /*
+                        * Calculate offset and length of CAIF packet while
+                        * taking care of the shared memory header.
+                        */
+                       frm_pck_ofs =
+                               frm_buf_ofs + SHM_HDR_LEN +
+                               (*(pbuf->desc_vptr + frm_buf_ofs));
+                       frm_pck_len =
+                               (pck_desc->frm_len - SHM_HDR_LEN -
+                               (*(pbuf->desc_vptr + frm_buf_ofs)));
+
+                       /* Check whether CAIF packet is within buffer limits */
+                       if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
+                               break;
+
+                       /* Get a suitable CAIF packet and copy in data. */
+                       skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
+                                                       frm_pck_len + 1);
+                       BUG_ON(skb == NULL);
+
+                       p = skb_put(skb, frm_pck_len);
+                       memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
+
+                       skb->protocol = htons(ETH_P_CAIF);
+                       skb_reset_mac_header(skb);
+                       skb->dev = pshm_drv->pshm_dev->pshm_netdev;
+
+                       /* Push received packet up the stack. */
+                       ret = netif_rx_ni(skb);
+
+                       if (!ret) {
+                               pshm_drv->pshm_dev->pshm_netdev->stats.
+                                                               rx_packets++;
+                               pshm_drv->pshm_dev->pshm_netdev->stats.
+                                               rx_bytes += pck_desc->frm_len;
+                       } else
+                               ++pshm_drv->pshm_dev->pshm_netdev->stats.
+                                                               rx_dropped;
+                       /* Move to next packet descriptor. */
+                       pck_desc++;
+               }
+
+               list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
+
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+       }
+
+       /* Schedule the work queue. if required */
+       if (!work_pending(&pshm_drv->shm_tx_work))
+               queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+}
+
+static void shm_tx_work_func(struct work_struct *tx_work)
+{
+       u32 mbox_msg;
+       unsigned int frmlen, avail_emptybuff, append = 0;
+       unsigned long flags = 0;
+       struct buf_list *pbuf = NULL;
+       struct shmdrv_layer *pshm_drv;
+       struct shm_caif_frm *frm;
+       struct sk_buff *skb;
+       struct shm_pck_desc *pck_desc;
+       struct list_head *pos;
+
+       pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
+
+       do {
+               /* Initialize mailbox message. */
+               mbox_msg = 0x00;
+               avail_emptybuff = 0;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check for pending receive buffers. */
+               if (!list_empty(&pshm_drv->rx_pend_list)) {
+
+                       pbuf = list_entry(pshm_drv->rx_pend_list.next,
+                                               struct buf_list, list);
+
+                       list_del_init(&pbuf->list);
+                       list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
+                       /*
+                        * Value index is never changed,
+                        * so read access should be safe.
+                        */
+                       mbox_msg |= SHM_SET_EMPTY(pbuf->index);
+               }
+
+               skb = skb_peek(&pshm_drv->sk_qhead);
+
+               if (skb == NULL)
+                       goto send_msg;
+
+               /* Check the available no. of buffers in the empty list */
+               list_for_each(pos, &pshm_drv->tx_empty_list)
+                       avail_emptybuff++;
+
+               if ((avail_emptybuff < LOW_WATERMARK) &&
+                                       pshm_drv->tx_empty_available) {
+                       /* Update blocking condition. */
+                       pshm_drv->tx_empty_available = 0;
+                       pshm_drv->cfdev.flowctrl
+                                       (pshm_drv->pshm_dev->pshm_netdev,
+                                       CAIF_FLOW_OFF);
+               }
+               /*
+                * We simply return back to the caller if we do not have space
+                * either in Tx pending list or Tx empty list. In this case,
+                * we hold the received skb in the skb list, waiting to
+                * be transmitted once Tx buffers become available
+                */
+               if (list_empty(&pshm_drv->tx_empty_list))
+                       goto send_msg;
+
+               /* Get the first free Tx buffer. */
+               pbuf = list_entry(pshm_drv->tx_empty_list.next,
+                                               struct buf_list, list);
+               do {
+                       if (append) {
+                               skb = skb_peek(&pshm_drv->sk_qhead);
+                               if (skb == NULL)
+                                       break;
+                       }
+
+                       frm = (struct shm_caif_frm *)
+                                       (pbuf->desc_vptr + pbuf->frm_ofs);
+
+                       frm->hdr_ofs = 0;
+                       frmlen = 0;
+                       frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
+
+                       /* Add tail padding if needed. */
+                       if (frmlen % SHM_FRM_PAD_LEN)
+                               frmlen += SHM_FRM_PAD_LEN -
+                                               (frmlen % SHM_FRM_PAD_LEN);
+
+                       /*
+                        * Verify that packet, header and additional padding
+                        * can fit within the buffer frame area.
+                        */
+                       if (frmlen >= (pbuf->len - pbuf->frm_ofs))
+                               break;
+
+                       if (!append) {
+                               list_del_init(&pbuf->list);
+                               append = 1;
+                       }
+
+                       skb = skb_dequeue(&pshm_drv->sk_qhead);
+                       /* Copy in CAIF frame. */
+                       skb_copy_bits(skb, 0, pbuf->desc_vptr +
+                                       pbuf->frm_ofs + SHM_HDR_LEN +
+                                               frm->hdr_ofs, skb->len);
+
+                       pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
+                       pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
+                                                                       frmlen;
+                       dev_kfree_skb(skb);
+
+                       /* Fill in the shared memory packet descriptor area. */
+                       pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
+                       /* Forward to current frame. */
+                       pck_desc += pbuf->frames;
+                       pck_desc->frm_ofs = (pbuf->phy_addr -
+                                               pshm_drv->shm_base_addr) +
+                                                               pbuf->frm_ofs;
+                       pck_desc->frm_len = frmlen;
+                       /* Terminate packet descriptor area. */
+                       pck_desc++;
+                       pck_desc->frm_ofs = 0;
+                       /* Update buffer parameters. */
+                       pbuf->frames++;
+                       pbuf->frm_ofs += frmlen + (frmlen % 32);
+
+               } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+
+               /* Assign buffer as full. */
+               list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
+               append = 0;
+               mbox_msg |= SHM_SET_FULL(pbuf->index);
+send_msg:
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+               if (mbox_msg)
+                       pshm_drv->pshm_dev->pshmdev_mbxsend
+                                       (pshm_drv->pshm_dev->shm_id, mbox_msg);
+       } while (mbox_msg);
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+       struct shmdrv_layer *pshm_drv;
+       unsigned long flags = 0;
+
+       pshm_drv = netdev_priv(shm_netdev);
+
+       spin_lock_irqsave(&pshm_drv->lock, flags);
+
+       skb_queue_tail(&pshm_drv->sk_qhead, skb);
+
+       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+       /* Schedule Tx work queue. for deferred processing of skbs*/
+       if (!work_pending(&pshm_drv->shm_tx_work))
+               queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+       return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+       .ndo_open = shm_netdev_open,
+       .ndo_stop = shm_netdev_close,
+       .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+       struct shmdrv_layer *pshm_drv;
+       pshm_netdev->netdev_ops = &netdev_ops;
+
+       pshm_netdev->mtu = CAIF_MAX_MTU;
+       pshm_netdev->type = ARPHRD_CAIF;
+       pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+       pshm_netdev->tx_queue_len = 0;
+       pshm_netdev->destructor = free_netdev;
+
+       pshm_drv = netdev_priv(pshm_netdev);
+
+       /* Initialize structures in a clean state. */
+       memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
+
+       pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+}
+
+int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
+{
+       int result, j;
+       struct shmdrv_layer *pshm_drv = NULL;
+
+       pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
+                                               "cfshm%d", shm_netdev_setup);
+       if (!pshm_dev->pshm_netdev)
+               return -ENOMEM;
+
+       pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
+       pshm_drv->pshm_dev = pshm_dev;
+
+       /*
+        * Initialization starts with the verification of the
+        * availability of MBX driver by calling its setup function.
+        * MBX driver must be available by this time for proper
+        * functioning of SHM driver.
+        */
+       if ((pshm_dev->pshmdev_mbxsetup
+                               (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
+               pr_warn("Could not config. SHM Mailbox,"
+                               " Bailing out.....\n");
+               free_netdev(pshm_dev->pshm_netdev);
+               return -ENODEV;
+       }
+
+       skb_queue_head_init(&pshm_drv->sk_qhead);
+
+       pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
+                       " INSTANCE AT pshm_drv =0x%p\n",
+                       pshm_drv->pshm_dev->shm_id, pshm_drv);
+
+       if (pshm_dev->shm_total_sz <
+                       (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
+
+               pr_warn("ERROR, Amount of available"
+                               " Phys. SHM cannot accomodate current SHM "
+                               "driver configuration, Bailing out ...\n");
+               free_netdev(pshm_dev->pshm_netdev);
+               return -ENOMEM;
+       }
+
+       pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
+       pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
+
+       if (pshm_dev->shm_loopback)
+               pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
+       else
+               pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
+                                               (NR_TX_BUF * TX_BUF_SZ);
+
+       INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
+       INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
+       INIT_LIST_HEAD(&pshm_drv->tx_full_list);
+
+       INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
+       INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
+       INIT_LIST_HEAD(&pshm_drv->rx_full_list);
+
+       INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
+       INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+
+       pshm_drv->pshm_tx_workqueue =
+                               create_singlethread_workqueue("shm_tx_work");
+       pshm_drv->pshm_rx_workqueue =
+                               create_singlethread_workqueue("shm_rx_work");
+
+       for (j = 0; j < NR_TX_BUF; j++) {
+               struct buf_list *tx_buf =
+                               kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+               if (tx_buf == NULL) {
+                       pr_warn("ERROR, Could not"
+                                       " allocate dynamic mem. for tx_buf,"
+                                       " Bailing out ...\n");
+                       free_netdev(pshm_dev->pshm_netdev);
+                       return -ENOMEM;
+               }
+               tx_buf->index = j;
+               tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
+               tx_buf->len = TX_BUF_SZ;
+               tx_buf->frames = 0;
+               tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+               if (pshm_dev->shm_loopback)
+                       tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+               else
+                       tx_buf->desc_vptr =
+                                       ioremap(tx_buf->phy_addr, TX_BUF_SZ);
+
+               list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
+       }
+
+       for (j = 0; j < NR_RX_BUF; j++) {
+               struct buf_list *rx_buf =
+                               kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+               if (rx_buf == NULL) {
+                       pr_warn("ERROR, Could not"
+                                       " allocate dynamic mem.for rx_buf,"
+                                       " Bailing out ...\n");
+                       free_netdev(pshm_dev->pshm_netdev);
+                       return -ENOMEM;
+               }
+               rx_buf->index = j;
+               rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
+               rx_buf->len = RX_BUF_SZ;
+
+               if (pshm_dev->shm_loopback)
+                       rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+               else
+                       rx_buf->desc_vptr =
+                                       ioremap(rx_buf->phy_addr, RX_BUF_SZ);
+               list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
+       }
+
+       pshm_drv->tx_empty_available = 1;
+       result = register_netdev(pshm_dev->pshm_netdev);
+       if (result)
+               pr_warn("ERROR[%d], SHM could not, "
+                       "register with NW FRMWK Bailing out ...\n", result);
+
+       return result;
+}
+
+void caif_shmcore_remove(struct net_device *pshm_netdev)
+{
+       struct buf_list *pbuf;
+       struct shmdrv_layer *pshm_drv = NULL;
+
+       pshm_drv = netdev_priv(pshm_netdev);
+
+       while (!(list_empty(&pshm_drv->tx_pend_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_pend_list.next,
+                                       struct buf_list, list);
+
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->tx_full_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_full_list.next,
+                                       struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->tx_empty_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_empty_list.next,
+                                       struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->rx_full_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_full_list.next,
+                               struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->rx_pend_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_pend_list.next,
+                               struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->rx_empty_list))) {
+               pbuf =
+                       list_entry(pshm_drv->rx_empty_list.next,
+                               struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       /* Destroy work queues. */
+       destroy_workqueue(pshm_drv->pshm_tx_workqueue);
+       destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+
+       unregister_netdev(pshm_netdev);
+}
index 9d9e453..080574b 100644 (file)
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
        ---help---
          Say Y here if you want to support for Freescale FlexCAN.
 
+config PCH_CAN
+       tristate "PCH CAN"
+       depends on CAN_DEV && PCI
+       ---help---
+         This driver is for PCH CAN of Topcliff which is an IOH for x86
+         embedded processor.
+         This driver can access CAN bus.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
index 0057537..90af15a 100644 (file)
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X)     += mcp251x.o
 obj-$(CONFIG_CAN_BFIN)         += bfin_can.o
 obj-$(CONFIG_CAN_JANZ_ICAN3)   += janz-ican3.o
 obj-$(CONFIG_CAN_FLEXCAN)      += flexcan.o
+obj-$(CONFIG_PCH_CAN)          += pch_can.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
index 2d8bd86..cee98fa 100644 (file)
@@ -2,7 +2,7 @@
  * at91_can.c - CAN network driver for AT91 SoC CAN controller
  *
  * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
- * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
  *
  * This software may be distributed under the terms of the GNU General
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
 
 #include <mach/board.h>
 
-#define DRV_NAME               "at91_can"
 #define AT91_NAPI_WEIGHT       12
 
 /*
@@ -172,6 +171,7 @@ struct at91_priv {
 };
 
 static struct can_bittiming_const at91_bittiming_const = {
+       .name           = KBUILD_MODNAME,
        .tseg1_min      = 4,
        .tseg1_max      = 16,
        .tseg2_min      = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
 
 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
 {
-       return readl(priv->reg_base + reg);
+       return __raw_readl(priv->reg_base + reg);
 }
 
 static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
                u32 value)
 {
-       writel(value, priv->reg_base + reg);
+       __raw_writel(value, priv->reg_base + reg);
 }
 
 static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
                set_mb_mode(priv, i, AT91_MB_MODE_RX);
        set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
 
+       /* reset acceptance mask and id register */
+       for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
+               at91_write(priv, AT91_MAM(i), 0x0 );
+               at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
+       }
+
        /* The last 4 mailboxes are used for transmitting. */
        for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
                set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
        const struct can_bittiming *bt = &priv->can.bittiming;
        u32 reg_br;
 
-       reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
-               ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+       reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
+               ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
                ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
                ((bt->phase_seg2 - 1) << 0);
 
-       dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+       netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
 
        at91_write(priv, AT91_BR, reg_br);
 
        return 0;
 }
 
+static int at91_get_berr_counter(const struct net_device *dev,
+               struct can_berr_counter *bec)
+{
+       const struct at91_priv *priv = netdev_priv(dev);
+       u32 reg_ecr = at91_read(priv, AT91_ECR);
+
+       bec->rxerr = reg_ecr & 0xff;
+       bec->txerr = reg_ecr >> 16;
+
+       return 0;
+}
+
 static void at91_chip_start(struct net_device *dev)
 {
        struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
        reg_mr = at91_read(priv, AT91_MR);
        at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
 
+       at91_set_bittiming(dev);
        at91_setup_mailboxes(dev);
        at91_transceiver_switch(priv, 1);
 
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
                netif_stop_queue(dev);
 
-               dev_err(dev->dev.parent,
-                       "BUG! TX buffer full when queue awake!\n");
+               netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
                return NETDEV_TX_BUSY;
        }
 
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
        struct sk_buff *skb;
        struct can_frame *cf;
 
-       dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+       netdev_dbg(dev, "RX buffer overflow\n");
        stats->rx_over_errors++;
        stats->rx_errors++;
 
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
        *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
        *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
 
+       /* allow RX of extended frames */
+       at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
+
        if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
                at91_rx_overflow_err(dev);
 }
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
 
        if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
            reg_sr & AT91_MB_RX_LOW_MASK)
-               dev_info(dev->dev.parent,
-                        "order of incoming frames cannot be guaranteed\n");
+               netdev_info(dev,
+                       "order of incoming frames cannot be guaranteed\n");
 
  again:
        for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* CRC error */
        if (reg_sr & AT91_IRQ_CERR) {
-               dev_dbg(dev->dev.parent, "CERR irq\n");
+               netdev_dbg(dev, "CERR irq\n");
                dev->stats.rx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* Stuffing Error */
        if (reg_sr & AT91_IRQ_SERR) {
-               dev_dbg(dev->dev.parent, "SERR irq\n");
+               netdev_dbg(dev, "SERR irq\n");
                dev->stats.rx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* Acknowledgement Error */
        if (reg_sr & AT91_IRQ_AERR) {
-               dev_dbg(dev->dev.parent, "AERR irq\n");
+               netdev_dbg(dev, "AERR irq\n");
                dev->stats.tx_errors++;
                cf->can_id |= CAN_ERR_ACK;
        }
 
        /* Form error */
        if (reg_sr & AT91_IRQ_FERR) {
-               dev_dbg(dev->dev.parent, "FERR irq\n");
+               netdev_dbg(dev, "FERR irq\n");
                dev->stats.rx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* Bit Error */
        if (reg_sr & AT91_IRQ_BERR) {
-               dev_dbg(dev->dev.parent, "BERR irq\n");
+               netdev_dbg(dev, "BERR irq\n");
                dev->stats.tx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
                struct can_frame *cf, enum can_state new_state)
 {
        struct at91_priv *priv = netdev_priv(dev);
-       u32 reg_idr, reg_ier, reg_ecr;
-       u8 tec, rec;
+       u32 reg_idr = 0, reg_ier = 0;
+       struct can_berr_counter bec;
 
-       reg_ecr = at91_read(priv, AT91_ECR);
-       rec = reg_ecr & 0xff;
-       tec = reg_ecr >> 16;
+       at91_get_berr_counter(dev, &bec);
 
        switch (priv->can.state) {
        case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
                 */
                if (new_state >= CAN_STATE_ERROR_WARNING &&
                    new_state <= CAN_STATE_BUS_OFF) {
-                       dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+                       netdev_dbg(dev, "Error Warning IRQ\n");
                        priv->can.can_stats.error_warning++;
 
                        cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (tec > rec) ?
+                       cf->data[1] = (bec.txerr > bec.rxerr) ?
                                CAN_ERR_CRTL_TX_WARNING :
                                CAN_ERR_CRTL_RX_WARNING;
                }
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
                 */
                if (new_state >= CAN_STATE_ERROR_PASSIVE &&
                    new_state <= CAN_STATE_BUS_OFF) {
-                       dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+                       netdev_dbg(dev, "Error Passive IRQ\n");
                        priv->can.can_stats.error_passive++;
 
                        cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (tec > rec) ?
+                       cf->data[1] = (bec.txerr > bec.rxerr) ?
                                CAN_ERR_CRTL_TX_PASSIVE :
                                CAN_ERR_CRTL_RX_PASSIVE;
                }
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
                if (new_state <= CAN_STATE_ERROR_PASSIVE) {
                        cf->can_id |= CAN_ERR_RESTARTED;
 
-                       dev_dbg(dev->dev.parent, "restarted\n");
+                       netdev_dbg(dev, "restarted\n");
                        priv->can.can_stats.restarts++;
 
                        netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
                 * circumstances. so just enable AT91_IRQ_ERRP, thus
                 * the "fallthrough"
                 */
-               dev_dbg(dev->dev.parent, "Error Active\n");
+               netdev_dbg(dev, "Error Active\n");
                cf->can_id |= CAN_ERR_PROT;
                cf->data[2] = CAN_ERR_PROT_ACTIVE;
        case CAN_STATE_ERROR_WARNING:   /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
 
                cf->can_id |= CAN_ERR_BUSOFF;
 
-               dev_dbg(dev->dev.parent, "bus-off\n");
+               netdev_dbg(dev, "bus-off\n");
                netif_carrier_off(dev);
                priv->can.can_stats.bus_off++;
 
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
        else if (likely(reg_sr & AT91_IRQ_ERRA))
                new_state = CAN_STATE_ERROR_ACTIVE;
        else {
-               dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+               netdev_err(dev, "BUG! hardware in undefined state\n");
                return;
        }
 
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
        .ndo_start_xmit = at91_start_xmit,
 };
 
-static int __init at91_can_probe(struct platform_device *pdev)
+static int __devinit at91_can_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
        struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
        priv->can.clock.freq = clk_get_rate(clk);
        priv->can.bittiming_const = &at91_bittiming_const;
-       priv->can.do_set_bittiming = at91_set_bittiming;
        priv->can.do_set_mode = at91_set_mode;
+       priv->can.do_get_berr_counter = at91_get_berr_counter;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
        priv->reg_base = addr;
        priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
        return 0;
 
  exit_free:
-       free_netdev(dev);
+       free_candev(dev);
  exit_iounmap:
        iounmap(addr);
  exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       free_netdev(dev);
-
        iounmap(priv->reg_base);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
 
        clk_put(priv->clk);
 
+       free_candev(dev);
+
        return 0;
 }
 
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
        .probe          = at91_can_probe,
        .remove         = __devexit_p(at91_can_remove),
        .driver         = {
-               .name   = DRV_NAME,
+               .name   = KBUILD_MODNAME,
                .owner  = THIS_MODULE,
        },
 };
 
 static int __init at91_can_module_init(void)
 {
-       printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
        return platform_driver_register(&at91_can_driver);
 }
 
 static void __exit at91_can_module_exit(void)
 {
        platform_driver_unregister(&at91_can_driver);
-       printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
 }
 
 module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
 
 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
 MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
+MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
index ef443a0..d499056 100644 (file)
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
 
        unregister_flexcandev(dev);
        platform_set_drvdata(pdev, NULL);
-       free_candev(dev);
        iounmap(priv->base);
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
 
        clk_put(priv->clk);
 
+       free_candev(dev);
+
        return 0;
 }
 
index 6aadc3e..7ab534a 100644 (file)
 #  define RXBSIDH_SHIFT 3
 #define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
 #  define RXBSIDL_IDE   0x08
+#  define RXBSIDL_SRR   0x10
 #  define RXBSIDL_EID   3
 #  define RXBSIDL_SHIFT 5
 #define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
                frame->can_id =
                        (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
                        (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+               if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+                       frame->can_id |= CAN_RTR_FLAG;
        }
        /* Data length */
        frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644 (file)
index 0000000..55ec324
--- /dev/null
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_MSG_OBJ            32
+#define MSG_OBJ_RX             0 /* The receive message object flag. */
+#define MSG_OBJ_TX             1 /* The transmit message object flag. */
+
+#define ENABLE                 1 /* The enable flag */
+#define DISABLE                        0 /* The disable flag */
+#define CAN_CTRL_INIT          0x0001 /* The INIT bit of CANCONT register. */
+#define CAN_CTRL_IE            0x0002 /* The IE bit of CAN control register */
+#define CAN_CTRL_IE_SIE_EIE    0x000e
+#define CAN_CTRL_CCE           0x0040
+#define CAN_CTRL_OPT           0x0080 /* The OPT bit of CANCONT register. */
+#define CAN_OPT_SILENT         0x0008 /* The Silent bit of CANOPT reg. */
+#define CAN_OPT_LBACK          0x0010 /* The LoopBack bit of CANOPT reg. */
+#define CAN_CMASK_RX_TX_SET    0x00f3
+#define CAN_CMASK_RX_TX_GET    0x0073
+#define CAN_CMASK_ALL          0xff
+#define CAN_CMASK_RDWR         0x80
+#define CAN_CMASK_ARB          0x20
+#define CAN_CMASK_CTRL         0x10
+#define CAN_CMASK_MASK         0x40
+#define CAN_CMASK_NEWDAT       0x04
+#define CAN_CMASK_CLRINTPND    0x08
+
+#define CAN_IF_MCONT_NEWDAT    0x8000
+#define CAN_IF_MCONT_INTPND    0x2000
+#define CAN_IF_MCONT_UMASK     0x1000
+#define CAN_IF_MCONT_TXIE      0x0800
+#define CAN_IF_MCONT_RXIE      0x0400
+#define CAN_IF_MCONT_RMTEN     0x0200
+#define CAN_IF_MCONT_TXRQXT    0x0100
+#define CAN_IF_MCONT_EOB       0x0080
+#define CAN_IF_MCONT_DLC       0x000f
+#define CAN_IF_MCONT_MSGLOST   0x4000
+#define CAN_MASK2_MDIR_MXTD    0xc000
+#define CAN_ID2_DIR            0x2000
+#define CAN_ID_MSGVAL          0x8000
+
+#define CAN_STATUS_INT         0x8000
+#define CAN_IF_CREQ_BUSY       0x8000
+#define CAN_ID2_XTD            0x4000
+
+#define CAN_REC                        0x00007f00
+#define CAN_TEC                        0x000000ff
+
+#define PCH_RX_OK              0x00000010
+#define PCH_TX_OK              0x00000008
+#define PCH_BUS_OFF            0x00000080
+#define PCH_EWARN              0x00000040
+#define PCH_EPASSIV            0x00000020
+#define PCH_LEC0               0x00000001
+#define PCH_LEC1               0x00000002
+#define PCH_LEC2               0x00000004
+#define PCH_LEC_ALL            (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
+#define PCH_STUF_ERR           PCH_LEC0
+#define PCH_FORM_ERR           PCH_LEC1
+#define PCH_ACK_ERR            (PCH_LEC0 | PCH_LEC1)
+#define PCH_BIT1_ERR           PCH_LEC2
+#define PCH_BIT0_ERR           (PCH_LEC0 | PCH_LEC2)
+#define PCH_CRC_ERR            (PCH_LEC1 | PCH_LEC2)
+
+/* bit position of certain controller bits. */
+#define BIT_BITT_BRP           0
+#define BIT_BITT_SJW           6
+#define BIT_BITT_TSEG1         8
+#define BIT_BITT_TSEG2         12
+#define BIT_IF1_MCONT_RXIE     10
+#define BIT_IF2_MCONT_TXIE     11
+#define BIT_BRPE_BRPE          6
+#define BIT_ES_TXERRCNT                0
+#define BIT_ES_RXERRCNT                8
+#define MSK_BITT_BRP           0x3f
+#define MSK_BITT_SJW           0xc0
+#define MSK_BITT_TSEG1         0xf00
+#define MSK_BITT_TSEG2         0x7000
+#define MSK_BRPE_BRPE          0x3c0
+#define MSK_BRPE_GET           0x0f
+#define MSK_CTRL_IE_SIE_EIE    0x07
+#define MSK_MCONT_TXIE         0x08
+#define MSK_MCONT_RXIE         0x10
+#define PCH_CAN_NO_TX_BUFF     1
+#define COUNTER_LIMIT          10
+
+#define PCH_CAN_CLK            50000000        /* 50MHz */
+
+/* Define the number of message object.
+ * PCH CAN communications are done via Message RAM.
+ * The Message RAM consists of 32 message objects. */
+#define PCH_RX_OBJ_NUM         26  /* 1~ PCH_RX_OBJ_NUM is Rx*/
+#define PCH_TX_OBJ_NUM         6  /* PCH_RX_OBJ_NUM is RX ~ Tx*/
+#define PCH_OBJ_NUM            (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+
+#define PCH_FIFO_THRESH                16
+
+enum pch_can_mode {
+       PCH_CAN_ENABLE,
+       PCH_CAN_DISABLE,
+       PCH_CAN_ALL,
+       PCH_CAN_NONE,
+       PCH_CAN_STOP,
+       PCH_CAN_RUN
+};
+
+struct pch_can_regs {
+       u32 cont;
+       u32 stat;
+       u32 errc;
+       u32 bitt;
+       u32 intr;
+       u32 opt;
+       u32 brpe;
+       u32 reserve1;
+       u32 if1_creq;
+       u32 if1_cmask;
+       u32 if1_mask1;
+       u32 if1_mask2;
+       u32 if1_id1;
+       u32 if1_id2;
+       u32 if1_mcont;
+       u32 if1_dataa1;
+       u32 if1_dataa2;
+       u32 if1_datab1;
+       u32 if1_datab2;
+       u32 reserve2;
+       u32 reserve3[12];
+       u32 if2_creq;
+       u32 if2_cmask;
+       u32 if2_mask1;
+       u32 if2_mask2;
+       u32 if2_id1;
+       u32 if2_id2;
+       u32 if2_mcont;
+       u32 if2_dataa1;
+       u32 if2_dataa2;
+       u32 if2_datab1;
+       u32 if2_datab2;
+       u32 reserve4;
+       u32 reserve5[20];
+       u32 treq1;
+       u32 treq2;
+       u32 reserve6[2];
+       u32 reserve7[56];
+       u32 reserve8[3];
+       u32 srst;
+};
+
+struct pch_can_priv {
+       struct can_priv can;
+       unsigned int can_num;
+       struct pci_dev *dev;
+       unsigned int tx_enable[MAX_MSG_OBJ];
+       unsigned int rx_enable[MAX_MSG_OBJ];
+       unsigned int rx_link[MAX_MSG_OBJ];
+       unsigned int int_enables;
+       unsigned int int_stat;
+       struct net_device *ndev;
+       spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
+       unsigned int msg_obj[MAX_MSG_OBJ];
+       struct pch_can_regs __iomem *regs;
+       struct napi_struct napi;
+       unsigned int tx_obj;    /* Point next Tx Obj index */
+       unsigned int use_msi;
+};
+
+static struct can_bittiming_const pch_can_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024, /* 6bit + extended 4bit */
+       .brp_inc = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
+       {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
+       {0,}
+};
+MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
+
+static inline void pch_can_bit_set(u32 *addr, u32 mask)
+{
+       iowrite32(ioread32(addr) | mask, addr);
+}
+
+static inline void pch_can_bit_clear(u32 *addr, u32 mask)
+{
+       iowrite32(ioread32(addr) & ~mask, addr);
+}
+
+static void pch_can_set_run_mode(struct pch_can_priv *priv,
+                                enum pch_can_mode mode)
+{
+       switch (mode) {
+       case PCH_CAN_RUN:
+               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+               break;
+
+       case PCH_CAN_STOP:
+               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+               break;
+
+       default:
+               dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+               break;
+       }
+}
+
+static void pch_can_set_optmode(struct pch_can_priv *priv)
+{
+       u32 reg_val = ioread32(&priv->regs->opt);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               reg_val |= CAN_OPT_SILENT;
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               reg_val |= CAN_OPT_LBACK;
+
+       pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+       iowrite32(reg_val, &priv->regs->opt);
+}
+
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+       /* Clearing the IE, SIE and EIE bits of Can control register. */
+       pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+
+       /* Appropriately setting them. */
+       pch_can_bit_set(&priv->regs->cont,
+                       ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
+{
+       /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+       *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+}
+
+static void pch_can_set_int_enables(struct pch_can_priv *priv,
+                                   enum pch_can_mode interrupt_no)
+{
+       switch (interrupt_no) {
+       case PCH_CAN_ENABLE:
+               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+               break;
+
+       case PCH_CAN_DISABLE:
+               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+               break;
+
+       case PCH_CAN_ALL:
+               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+               break;
+
+       case PCH_CAN_NONE:
+               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+               break;
+
+       default:
+               dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+               break;
+       }
+}
+
+static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+{
+       u32 counter = COUNTER_LIMIT;
+       u32 ifx_creq;
+
+       iowrite32(num, creq_addr);
+       while (counter) {
+               ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+               if (!ifx_creq)
+                       break;
+               counter--;
+               udelay(1);
+       }
+       if (!counter)
+               pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
+}
+
+static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                 u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       /* Reading the receive buffer data from RAM to Interface1 registers */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+       /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
+       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                 &priv->regs->if1_cmask);
+
+       if (set == ENABLE) {
+               /* Setting the MsgVal and RxIE bits */
+               pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+               pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+
+       } else if (set == DISABLE) {
+               /* Resetting the MsgVal and RxIE bits */
+               pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+               pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+       }
+
+       pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as receivers. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX)
+                       pch_can_set_rx_enable(priv, i + 1, ENABLE);
+       }
+}
+
+static void pch_can_rx_disable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as receivers. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX)
+                       pch_can_set_rx_enable(priv, i + 1, DISABLE);
+       }
+}
+
+static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       /* Reading the Msg buffer from Message RAM to Interface2 registers. */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+       /* Setting the IF2CMASK register for accessing the
+               MsgVal and TxIE bits */
+       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                &priv->regs->if2_cmask);
+
+       if (set == ENABLE) {
+               /* Setting the MsgVal and TxIE bits */
+               pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+               pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+       } else if (set == DISABLE) {
+               /* Resetting the MsgVal and TxIE bits. */
+               pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+               pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+       }
+
+       pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as transmit object. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX)
+                       pch_can_set_tx_enable(priv, i + 1, ENABLE);
+       }
+}
+
+static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as transmit object. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX)
+                       pch_can_set_tx_enable(priv, i + 1, DISABLE);
+       }
+}
+
+static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                u32 *enable)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+       if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+                       ((ioread32(&priv->regs->if1_mcont)) &
+                       CAN_IF_MCONT_RXIE))
+               *enable = ENABLE;
+       else
+               *enable = DISABLE;
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                u32 *enable)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+       if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+                       ((ioread32(&priv->regs->if2_mcont)) &
+                       CAN_IF_MCONT_TXIE)) {
+               *enable = ENABLE;
+       } else {
+               *enable = DISABLE;
+       }
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static int pch_can_int_pending(struct pch_can_priv *priv)
+{
+       return ioread32(&priv->regs->intr) & 0xffff;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+                                      u32 buffer_num, u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
+       if (set == ENABLE)
+               pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+       else
+               pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+
+       pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+                                      u32 buffer_num, u32 *link)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+
+       if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
+               *link = DISABLE;
+       else
+               *link = ENABLE;
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_clear_buffers(struct pch_can_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
+               iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+               iowrite32(0xffff, &priv->regs->if1_mask1);
+               iowrite32(0xffff, &priv->regs->if1_mask2);
+               iowrite32(0x0, &priv->regs->if1_id1);
+               iowrite32(0x0, &priv->regs->if1_id2);
+               iowrite32(0x0, &priv->regs->if1_mcont);
+               iowrite32(0x0, &priv->regs->if1_dataa1);
+               iowrite32(0x0, &priv->regs->if1_dataa2);
+               iowrite32(0x0, &priv->regs->if1_datab1);
+               iowrite32(0x0, &priv->regs->if1_datab2);
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                         CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                         &priv->regs->if1_cmask);
+               pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+       }
+
+       for (i = i;  i < PCH_OBJ_NUM; i++) {
+               iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+               iowrite32(0xffff, &priv->regs->if2_mask1);
+               iowrite32(0xffff, &priv->regs->if2_mask2);
+               iowrite32(0x0, &priv->regs->if2_id1);
+               iowrite32(0x0, &priv->regs->if2_id2);
+               iowrite32(0x0, &priv->regs->if2_mcont);
+               iowrite32(0x0, &priv->regs->if2_dataa1);
+               iowrite32(0x0, &priv->regs->if2_dataa2);
+               iowrite32(0x0, &priv->regs->if2_datab1);
+               iowrite32(0x0, &priv->regs->if2_datab2);
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                         CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                         &priv->regs->if2_cmask);
+               pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+       }
+}
+
+static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
+{
+       int i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+                       iowrite32(CAN_CMASK_RX_TX_GET,
+                               &priv->regs->if1_cmask);
+                       pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+
+                       iowrite32(0x0, &priv->regs->if1_id1);
+                       iowrite32(0x0, &priv->regs->if1_id2);
+
+                       pch_can_bit_set(&priv->regs->if1_mcont,
+                                       CAN_IF_MCONT_UMASK);
+
+                       /* Set FIFO mode set to 0 except last Rx Obj*/
+                       pch_can_bit_clear(&priv->regs->if1_mcont,
+                                         CAN_IF_MCONT_EOB);
+                       /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+                       if (i == (PCH_RX_OBJ_NUM - 1))
+                               pch_can_bit_set(&priv->regs->if1_mcont,
+                                                 CAN_IF_MCONT_EOB);
+
+                       iowrite32(0, &priv->regs->if1_mask1);
+                       pch_can_bit_clear(&priv->regs->if1_mask2,
+                                         0x1fff | CAN_MASK2_MDIR_MXTD);
+
+                       /* Setting CMASK for writing */
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                                 CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                                 &priv->regs->if1_cmask);
+
+                       pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+               } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
+                       iowrite32(CAN_CMASK_RX_TX_GET,
+                               &priv->regs->if2_cmask);
+                       pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+
+                       /* Resetting DIR bit for reception */
+                       iowrite32(0x0, &priv->regs->if2_id1);
+                       iowrite32(0x0, &priv->regs->if2_id2);
+                       pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+                       /* Setting EOB bit for transmitter */
+                       iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+
+                       pch_can_bit_set(&priv->regs->if2_mcont,
+                                       CAN_IF_MCONT_UMASK);
+
+                       iowrite32(0, &priv->regs->if2_mask1);
+                       pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+
+                       /* Setting CMASK for writing */
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                                 CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                                 &priv->regs->if2_cmask);
+
+                       pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+               }
+       }
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_init(struct pch_can_priv *priv)
+{
+       /* Stopping the Can device. */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Clearing all the message object buffers. */
+       pch_can_clear_buffers(priv);
+
+       /* Configuring the respective message object as either rx/tx object. */
+       pch_can_config_rx_tx_buffers(priv);
+
+       /* Enabling the interrupts. */
+       pch_can_set_int_enables(priv, PCH_CAN_ALL);
+}
+
+static void pch_can_release(struct pch_can_priv *priv)
+{
+       /* Stooping the CAN device. */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Disabling the interrupts. */
+       pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+       /* Disabling all the receive object. */
+       pch_can_rx_disable_all(priv);
+
+       /* Disabling all the transmit object. */
+       pch_can_tx_disable_all(priv);
+}
+
+/* This function clears interrupt(s) from the CAN device. */
+static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
+{
+       if (mask == CAN_STATUS_INT) {
+               ioread32(&priv->regs->stat);
+               return;
+       }
+
+       /* Clear interrupt for transmit object */
+       if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+               /* Setting CMASK for clearing interrupts for
+                                        frame transmission. */
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+                         &priv->regs->if2_cmask);
+
+               /* Resetting the ID registers. */
+               pch_can_bit_set(&priv->regs->if2_id2,
+                              CAN_ID2_DIR | (0x7ff << 2));
+               iowrite32(0x0, &priv->regs->if2_id1);
+
+               /* Claring NewDat, TxRqst & IntPnd */
+               pch_can_bit_clear(&priv->regs->if2_mcont,
+                                 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+                                 CAN_IF_MCONT_TXRQXT);
+               pch_can_check_if_busy(&priv->regs->if2_creq, mask);
+       } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+               /* Setting CMASK for clearing the reception interrupts. */
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+                         &priv->regs->if1_cmask);
+
+               /* Clearing the Dir bit. */
+               pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+               /* Clearing NewDat & IntPnd */
+               pch_can_bit_clear(&priv->regs->if1_mcont,
+                                 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+
+               pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+       }
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+       return (ioread32(&priv->regs->treq1) & 0xffff) |
+              ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+}
+
+static void pch_can_reset(struct pch_can_priv *priv)
+{
+       /* write to sw reset register */
+       iowrite32(1, &priv->regs->srst);
+       iowrite32(0, &priv->regs->srst);
+}
+
+static void pch_can_error(struct net_device *ndev, u32 status)
+{
+       struct sk_buff *skb;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf;
+       u32 errc;
+       struct net_device_stats *stats = &(priv->ndev->stats);
+       enum can_state state = priv->can.state;
+
+       skb = alloc_can_err_skb(ndev, &cf);
+       if (!skb)
+               return;
+
+       if (status & PCH_BUS_OFF) {
+               pch_can_tx_disable_all(priv);
+               pch_can_rx_disable_all(priv);
+               state = CAN_STATE_BUS_OFF;
+               cf->can_id |= CAN_ERR_BUSOFF;
+               can_bus_off(ndev);
+               pch_can_set_run_mode(priv, PCH_CAN_RUN);
+               dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
+       }
+
+       /* Warning interrupt. */
+       if (status & PCH_EWARN) {
+               state = CAN_STATE_ERROR_WARNING;
+               priv->can.can_stats.error_warning++;
+               cf->can_id |= CAN_ERR_CRTL;
+               errc = ioread32(&priv->regs->errc);
+               if (((errc & CAN_REC) >> 8) > 96)
+                       cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+               if ((errc & CAN_TEC) > 96)
+                       cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+               dev_warn(&ndev->dev,
+                       "%s -> Error Counter is more than 96.\n", __func__);
+       }
+       /* Error passive interrupt. */
+       if (status & PCH_EPASSIV) {
+               priv->can.can_stats.error_passive++;
+               state = CAN_STATE_ERROR_PASSIVE;
+               cf->can_id |= CAN_ERR_CRTL;
+               errc = ioread32(&priv->regs->errc);
+               if (((errc & CAN_REC) >> 8) > 127)
+                       cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+               if ((errc & CAN_TEC) > 127)
+                       cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+               dev_err(&ndev->dev,
+                       "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
+       }
+
+       if (status & PCH_LEC_ALL) {
+               priv->can.can_stats.bus_error++;
+               stats->rx_errors++;
+               switch (status & PCH_LEC_ALL) {
+               case PCH_STUF_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_STUFF;
+                       break;
+               case PCH_FORM_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_FORM;
+                       break;
+               case PCH_ACK_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+                                      CAN_ERR_PROT_LOC_ACK_DEL;
+                       break;
+               case PCH_BIT1_ERR:
+               case PCH_BIT0_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_BIT;
+                       break;
+               case PCH_CRC_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+                                      CAN_ERR_PROT_LOC_CRC_DEL;
+                       break;
+               default:
+                       iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
+                       break;
+               }
+
+       }
+
+       priv->can.state = state;
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+}
+
+static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = (struct net_device *)dev_id;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+       napi_schedule(&priv->napi);
+
+       return IRQ_HANDLED;
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+{
+       u32 reg;
+       canid_t id;
+       u32 ide;
+       u32 rtr;
+       int i, j, k;
+       int rcv_pkts = 0;
+       struct sk_buff *skb;
+       struct can_frame *cf;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &(priv->ndev->stats);
+
+       /* Reading the messsage object from the Message RAM */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+
+       /* Reading the MCONT register. */
+       reg = ioread32(&priv->regs->if1_mcont);
+       reg &= 0xffff;
+
+       for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+               /* If MsgLost bit set. */
+               if (reg & CAN_IF_MCONT_MSGLOST) {
+                       dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
+                       pch_can_bit_clear(&priv->regs->if1_mcont,
+                                         CAN_IF_MCONT_MSGLOST);
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+                                 &priv->regs->if1_cmask);
+                       pch_can_check_if_busy(&priv->regs->if1_creq, k);
+
+                       skb = alloc_can_err_skb(ndev, &cf);
+                       if (!skb)
+                               return -ENOMEM;
+
+                       priv->can.can_stats.error_passive++;
+                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+                       cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+                       stats->rx_packets++;
+                       stats->rx_bytes += cf->can_dlc;
+
+                       netif_receive_skb(skb);
+                       rcv_pkts++;
+                       goto RX_NEXT;
+               }
+               if (!(reg & CAN_IF_MCONT_NEWDAT))
+                       goto RX_NEXT;
+
+               skb = alloc_can_skb(priv->ndev, &cf);
+               if (!skb)
+                       return -ENOMEM;
+
+               /* Get Received data */
+               ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+               if (ide) {
+                       id = (ioread32(&priv->regs->if1_id1) & 0xffff);
+                       id |= (((ioread32(&priv->regs->if1_id2)) &
+                                           0x1fff) << 16);
+                       cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+               } else {
+                       id = (((ioread32(&priv->regs->if1_id2)) &
+                                         (CAN_SFF_MASK << 2)) >> 2);
+                       cf->can_id = (id & CAN_SFF_MASK);
+               }
+
+               rtr = (ioread32(&priv->regs->if1_id2) &  CAN_ID2_DIR);
+               if (rtr) {
+                       cf->can_dlc = 0;
+                       cf->can_id |= CAN_RTR_FLAG;
+               } else {
+                       cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
+                                                  0x0f);
+               }
+
+               for (i = 0, j = 0; i < cf->can_dlc; j++) {
+                       reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+                       cf->data[i++] = cpu_to_le32(reg & 0xff);
+                       if (i == cf->can_dlc)
+                               break;
+                       cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+               }
+
+               netif_receive_skb(skb);
+               rcv_pkts++;
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+
+               if (k < PCH_FIFO_THRESH) {
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
+                                 CAN_CMASK_ARB, &priv->regs->if1_cmask);
+
+                       /* Clearing the Dir bit. */
+                       pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+                       /* Clearing NewDat & IntPnd */
+                       pch_can_bit_clear(&priv->regs->if1_mcont,
+                                         CAN_IF_MCONT_INTPND);
+                       pch_can_check_if_busy(&priv->regs->if1_creq, k);
+               } else if (k > PCH_FIFO_THRESH) {
+                       pch_can_int_clr(priv, k);
+               } else if (k == PCH_FIFO_THRESH) {
+                       int cnt;
+                       for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+                               pch_can_int_clr(priv, cnt+1);
+               }
+RX_NEXT:
+               /* Reading the messsage object from the Message RAM */
+               iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+               pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
+               reg = ioread32(&priv->regs->if1_mcont);
+       }
+
+       return rcv_pkts;
+}
+static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+{
+       struct net_device *ndev = napi->dev;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &(priv->ndev->stats);
+       u32 dlc;
+       u32 int_stat;
+       int rcv_pkts = 0;
+       u32 reg_stat;
+       unsigned long flags;
+
+       int_stat = pch_can_int_pending(priv);
+       if (!int_stat)
+               return 0;
+
+INT_STAT:
+       if (int_stat == CAN_STATUS_INT) {
+               reg_stat = ioread32(&priv->regs->stat);
+               if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
+                       if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
+                               pch_can_error(ndev, reg_stat);
+               }
+
+               if (reg_stat & PCH_TX_OK) {
+                       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+                       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+                       pch_can_check_if_busy(&priv->regs->if2_creq,
+                                              ioread32(&priv->regs->intr));
+                       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+                       pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+               }
+
+               if (reg_stat & PCH_RX_OK)
+                       pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+
+               int_stat = pch_can_int_pending(priv);
+               if (int_stat == CAN_STATUS_INT)
+                       goto INT_STAT;
+       }
+
+MSG_OBJ:
+       if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
+               spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+               rcv_pkts = pch_can_rx_normal(ndev, int_stat);
+               spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+               if (rcv_pkts < 0)
+                       return 0;
+       } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
+               if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+                       /* Handle transmission interrupt */
+                       can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
+                       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+                       iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+                                 &priv->regs->if2_cmask);
+                       dlc = ioread32(&priv->regs->if2_mcont) &
+                                      CAN_IF_MCONT_DLC;
+                       pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
+                       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+                       if (dlc > 8)
+                               dlc = 8;
+                       stats->tx_bytes += dlc;
+                       stats->tx_packets++;
+               }
+       }
+
+       int_stat = pch_can_int_pending(priv);
+       if (int_stat == CAN_STATUS_INT)
+               goto INT_STAT;
+       else if (int_stat >= 1 && int_stat <= 32)
+               goto MSG_OBJ;
+
+       napi_complete(napi);
+       pch_can_set_int_enables(priv, PCH_CAN_ALL);
+
+       return rcv_pkts;
+}
+
+static int pch_set_bittiming(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       const struct can_bittiming *bt = &priv->can.bittiming;
+       u32 canbit;
+       u32 bepe;
+       u32 brp;
+
+       /* Setting the CCE bit for accessing the Can Timing register. */
+       pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+
+       brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
+       canbit = brp & MSK_BITT_BRP;
+       canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
+       canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
+       canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
+       bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+       iowrite32(canbit, &priv->regs->bitt);
+       iowrite32(bepe, &priv->regs->brpe);
+       pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+
+       return 0;
+}
+
+static void pch_can_start(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       if (priv->can.state != CAN_STATE_STOPPED)
+               pch_can_reset(priv);
+
+       pch_set_bittiming(ndev);
+       pch_can_set_optmode(priv);
+
+       pch_can_tx_enable_all(priv);
+       pch_can_rx_enable_all(priv);
+
+       /* Setting the CAN to run mode. */
+       pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       return;
+}
+
+static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+       int ret = 0;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               pch_can_start(ndev);
+               netif_wake_queue(ndev);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
+static int pch_can_open(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       int retval;
+
+       retval = pci_enable_msi(priv->dev);
+       if (retval) {
+               dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
+               priv->use_msi = 0;
+       } else {
+               dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
+               priv->use_msi = 1;
+       }
+
+       /* Regsitering the interrupt. */
+       retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
+                            ndev->name, ndev);
+       if (retval) {
+               dev_err(&ndev->dev, "request_irq failed.\n");
+               goto req_irq_err;
+       }
+
+       /* Open common can device */
+       retval = open_candev(ndev);
+       if (retval) {
+               dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+               goto err_open_candev;
+       }
+
+       pch_can_init(priv);
+       pch_can_start(ndev);
+       napi_enable(&priv->napi);
+       netif_start_queue(ndev);
+
+       return 0;
+
+err_open_candev:
+       free_irq(priv->dev->irq, ndev);
+req_irq_err:
+       if (priv->use_msi)
+               pci_disable_msi(priv->dev);
+
+       pch_can_release(priv);
+
+       return retval;
+}
+
+static int pch_close(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       napi_disable(&priv->napi);
+       pch_can_release(priv);
+       free_irq(priv->dev->irq, ndev);
+       if (priv->use_msi)
+               pci_disable_msi(priv->dev);
+       close_candev(ndev);
+       priv->can.state = CAN_STATE_STOPPED;
+       return 0;
+}
+
+static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
+{
+       u32 buffer_status = 0;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       /* Getting the message object status. */
+       buffer_status = (u32) pch_can_get_buffer_status(priv);
+
+       return buffer_status & obj_id;
+}
+
+
+static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       int i, j;
+       unsigned long flags;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       int tx_buffer_avail = 0;
+
+       if (can_dropped_invalid_skb(ndev, skb))
+               return NETDEV_TX_OK;
+
+       if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+               while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
+                                          PCH_RX_OBJ_NUM)))
+                       udelay(500);
+
+               priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+               tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+       } else {
+               tx_buffer_avail = priv->tx_obj;
+       }
+       priv->tx_obj++;
+
+       /* Attaining the lock. */
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+       /* Reading the Msg Obj from the Msg RAM to the Interface register. */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+       /* Setting the CMASK register. */
+       pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+
+       /* If ID extended is set. */
+       pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
+       pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+       if (cf->can_id & CAN_EFF_FLAG) {
+               pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
+               pch_can_bit_set(&priv->regs->if2_id2,
+                               ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+       } else {
+               pch_can_bit_set(&priv->regs->if2_id1, 0);
+               pch_can_bit_set(&priv->regs->if2_id2,
+                               (cf->can_id & CAN_SFF_MASK) << 2);
+       }
+
+       /* If remote frame has to be transmitted.. */
+       if (cf->can_id & CAN_RTR_FLAG)
+               pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+       for (i = 0, j = 0; i < cf->can_dlc; j++) {
+               iowrite32(le32_to_cpu(cf->data[i++]),
+                        (&priv->regs->if2_dataa1) + j*4);
+               if (i == cf->can_dlc)
+                       break;
+               iowrite32(le32_to_cpu(cf->data[i++] << 8),
+                        (&priv->regs->if2_dataa1) + j*4);
+       }
+
+       can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+
+       /* Updating the size of the data. */
+       pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
+       pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+
+       /* Clearing IntPend, NewDat & TxRqst */
+       pch_can_bit_clear(&priv->regs->if2_mcont,
+                         CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+                         CAN_IF_MCONT_TXRQXT);
+
+       /* Setting NewDat, TxRqst bits */
+       pch_can_bit_set(&priv->regs->if2_mcont,
+                       CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+
+       pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops pch_can_netdev_ops = {
+       .ndo_open               = pch_can_open,
+       .ndo_stop               = pch_close,
+       .ndo_start_xmit         = pch_xmit,
+};
+
+static void __devexit pch_can_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       unregister_candev(priv->ndev);
+       free_candev(priv->ndev);
+       pci_iounmap(pdev, priv->regs);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       pch_can_reset(priv);
+}
+
+#ifdef CONFIG_PM
+static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       int i;                  /* Counter variable. */
+       int retval;             /* Return value. */
+       u32 buf_stat;   /* Variable for reading the transmit buffer status. */
+       u32 counter = 0xFFFFFF;
+
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct pch_can_priv *priv = netdev_priv(dev);
+
+       /* Stop the CAN controller */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Indicate that we are aboutto/in suspend */
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       /* Waiting for all transmission to complete. */
+       while (counter) {
+               buf_stat = pch_can_get_buffer_status(priv);
+               if (!buf_stat)
+                       break;
+               counter--;
+               udelay(1);
+       }
+       if (!counter)
+               dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+
+       /* Save interrupt configuration and then disable them */
+       pch_can_get_int_enables(priv, &(priv->int_enables));
+       pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+       /* Save Tx buffer enable state */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX)
+                       pch_can_get_tx_enable(priv, i + 1,
+                                             &(priv->tx_enable[i]));
+       }
+
+       /* Disable all Transmit buffers */
+       pch_can_tx_disable_all(priv);
+
+       /* Save Rx buffer enable state */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+                       pch_can_get_rx_enable(priv, i + 1,
+                                               &(priv->rx_enable[i]));
+                       pch_can_get_rx_buffer_link(priv, i + 1,
+                                               &(priv->rx_link[i]));
+               }
+       }
+
+       /* Disable all Receive buffers */
+       pch_can_rx_disable_all(priv);
+       retval = pci_save_state(pdev);
+       if (retval) {
+               dev_err(&pdev->dev, "pci_save_state failed.\n");
+       } else {
+               pci_enable_wake(pdev, PCI_D3hot, 0);
+               pci_disable_device(pdev);
+               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       }
+
+       return retval;
+}
+
+static int pch_can_resume(struct pci_dev *pdev)
+{
+       int i;                  /* Counter variable. */
+       int retval;             /* Return variable. */
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct pch_can_priv *priv = netdev_priv(dev);
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       retval = pci_enable_device(pdev);
+       if (retval) {
+               dev_err(&pdev->dev, "pci_enable_device failed.\n");
+               return retval;
+       }
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* Disabling all interrupts. */
+       pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+       /* Setting the CAN device in Stop Mode. */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Configuring the transmit and receive buffers. */
+       pch_can_config_rx_tx_buffers(priv);
+
+       /* Restore the CAN state */
+       pch_set_bittiming(dev);
+
+       /* Listen/Active */
+       pch_can_set_optmode(priv);
+
+       /* Enabling the transmit buffer. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX) {
+                       pch_can_set_tx_enable(priv, i + 1,
+                                             priv->tx_enable[i]);
+               }
+       }
+
+       /* Configuring the receive buffer and enabling them. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+                       /* Restore buffer link */
+                       pch_can_set_rx_buffer_link(priv, i + 1,
+                                                  priv->rx_link[i]);
+
+                       /* Restore buffer enables */
+                       pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
+               }
+       }
+
+       /* Enable CAN Interrupts */
+       pch_can_set_int_custom(priv);
+
+       /* Restore Run Mode */
+       pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+       return retval;
+}
+#else
+#define pch_can_suspend NULL
+#define pch_can_resume NULL
+#endif
+
+static int pch_can_get_berr_counter(const struct net_device *dev,
+                                   struct can_berr_counter *bec)
+{
+       struct pch_can_priv *priv = netdev_priv(dev);
+
+       bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
+       bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+
+       return 0;
+}
+
+static int __devinit pch_can_probe(struct pci_dev *pdev,
+                                  const struct pci_device_id *id)
+{
+       struct net_device *ndev;
+       struct pch_can_priv *priv;
+       int rc;
+       int index;
+       void __iomem *addr;
+
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
+               goto probe_exit_endev;
+       }
+
+       rc = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
+               goto probe_exit_pcireq;
+       }
+
+       addr = pci_iomap(pdev, 1, 0);
+       if (!addr) {
+               rc = -EIO;
+               dev_err(&pdev->dev, "Failed pci_iomap\n");
+               goto probe_exit_ipmap;
+       }
+
+       ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+       if (!ndev) {
+               rc = -ENOMEM;
+               dev_err(&pdev->dev, "Failed alloc_candev\n");
+               goto probe_exit_alloc_candev;
+       }
+
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+       priv->regs = addr;
+       priv->dev = pdev;
+       priv->can.bittiming_const = &pch_can_bittiming_const;
+       priv->can.do_set_mode = pch_can_do_set_mode;
+       priv->can.do_get_berr_counter = pch_can_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+                                      CAN_CTRLMODE_LOOPBACK;
+       priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+
+       ndev->irq = pdev->irq;
+       ndev->flags |= IFF_ECHO;
+
+       pci_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       ndev->netdev_ops = &pch_can_netdev_ops;
+
+       priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
+       for (index = 0; index < PCH_RX_OBJ_NUM;)
+               priv->msg_obj[index++] = MSG_OBJ_RX;
+
+       for (index = index;  index < PCH_OBJ_NUM;)
+               priv->msg_obj[index++] = MSG_OBJ_TX;
+
+       netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+
+       rc = register_candev(ndev);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
+               goto probe_exit_reg_candev;
+       }
+
+       return 0;
+
+probe_exit_reg_candev:
+       free_candev(ndev);
+probe_exit_alloc_candev:
+       pci_iounmap(pdev, addr);
+probe_exit_ipmap:
+       pci_release_regions(pdev);
+probe_exit_pcireq:
+       pci_disable_device(pdev);
+probe_exit_endev:
+       return rc;
+}
+
+static struct pci_driver pch_can_pcidev = {
+       .name = "pch_can",
+       .id_table = pch_pci_tbl,
+       .probe = pch_can_probe,
+       .remove = __devexit_p(pch_can_remove),
+       .suspend = pch_can_suspend,
+       .resume = pch_can_resume,
+};
+
+static int __init pch_can_pci_init(void)
+{
+       return pci_register_driver(&pch_can_pcidev);
+}
+module_init(pch_can_pci_init);
+
+static void __exit pch_can_pci_exit(void)
+{
+       pci_unregister_driver(&pch_can_pcidev);
+}
+module_exit(pch_can_pci_exit);
+
+MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.94");
index ae3505a..6fdc031 100644 (file)
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
           - esd CAN-PCIe/2000
           - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
           - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config CAN_TSCAN1
+       tristate "TS-CAN1 PC104 boards"
+       depends on ISA
+       help
+       This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+       http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+       The driver supports multiple boards and automatically configures them:
+       PLD IO base addresses are read from jumpers JP1 and JP2,
+       IRQ numbers are read from jumpers JP4 and JP5,
+       SJA1000 IO base addresses are chosen heuristically (first that works).
+
 endif
index ce92455..2c591eb 100644 (file)
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
 obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
 obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
 obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644 (file)
index 0000000..9756099
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
+ *
+ * Copyright 2010 Andre B. Oliveira
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * References:
+ * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
+ *     http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "sja1000.h"
+
+MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
+MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
+#define TSCAN1_MAXDEV 4
+
+/* PLD registers address offsets */
+#define TSCAN1_ID1     0
+#define TSCAN1_ID2     1
+#define TSCAN1_VERSION 2
+#define TSCAN1_LED     3
+#define TSCAN1_PAGE    4
+#define TSCAN1_MODE    5
+#define TSCAN1_JUMPERS 6
+
+/* PLD board identifier registers magic values */
+#define TSCAN1_ID1_VALUE 0xf6
+#define TSCAN1_ID2_VALUE 0xb9
+
+/* PLD mode register SJA1000 IO enable bit */
+#define TSCAN1_MODE_ENABLE 0x40
+
+/* PLD jumpers register bits */
+#define TSCAN1_JP4 0x10
+#define TSCAN1_JP5 0x20
+
+/* PLD IO base addresses start */
+#define TSCAN1_PLD_ADDRESS 0x150
+
+/* PLD register space size */
+#define TSCAN1_PLD_SIZE 8
+
+/* SJA1000 register space size */
+#define TSCAN1_SJA1000_SIZE 32
+
+/* SJA1000 crystal frequency (16MHz) */
+#define TSCAN1_SJA1000_XTAL 16000000
+
+/* SJA1000 IO base addresses */
+static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+       0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
+};
+
+/* Read SJA1000 register */
+static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
+{
+       return inb((unsigned long)priv->reg_base + reg);
+}
+
+/* Write SJA1000 register */
+static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
+{
+       outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
+static int __devinit tscan1_probe(struct device *dev, unsigned id)
+{
+       struct net_device *netdev;
+       struct sja1000_priv *priv;
+       unsigned long pld_base, sja1000_base;
+       int irq, i;
+
+       pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
+       if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
+               return -EBUSY;
+
+       if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
+           inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
+               release_region(pld_base, TSCAN1_PLD_SIZE);
+               return -ENODEV;
+       }
+
+       switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
+       case TSCAN1_JP4:
+               irq = 6;
+               break;
+       case TSCAN1_JP5:
+               irq = 7;
+               break;
+       case TSCAN1_JP4 | TSCAN1_JP5:
+               irq = 5;
+               break;
+       default:
+               dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
+               release_region(pld_base, TSCAN1_PLD_SIZE);
+               return -EINVAL;
+       }
+
+       netdev = alloc_sja1000dev(0);
+       if (!netdev) {
+               release_region(pld_base, TSCAN1_PLD_SIZE);
+               return -ENOMEM;
+       }
+
+       dev_set_drvdata(dev, netdev);
+       SET_NETDEV_DEV(netdev, dev);
+
+       netdev->base_addr = pld_base;
+       netdev->irq = irq;
+
+       priv = netdev_priv(netdev);
+       priv->read_reg = tscan1_read;
+       priv->write_reg = tscan1_write;
+       priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
+       priv->cdr = CDR_CBP | CDR_CLK_OFF;
+       priv->ocr = OCR_TX0_PUSHPULL;
+
+       /* Select the first SJA1000 IO address that is free and that works */
+       for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
+               sja1000_base = tscan1_sja1000_addresses[i];
+               if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
+                                                               dev_name(dev)))
+                       continue;
+
+               /* Set SJA1000 IO base address and enable it */
+               outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
+
+               priv->reg_base = (void __iomem *)sja1000_base;
+               if (!register_sja1000dev(netdev)) {
+                       /* SJA1000 probe succeeded; turn LED off and return */
+                       outb(0, pld_base + TSCAN1_LED);
+                       netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
+                                               pld_base, sja1000_base, irq);
+                       return 0;
+               }
+
+               /* SJA1000 probe failed; release and try next address */
+               outb(0, pld_base + TSCAN1_MODE);
+               release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+       }
+
+       dev_err(dev, "failed to assign SJA1000 IO address\n");
+       dev_set_drvdata(dev, NULL);
+       free_sja1000dev(netdev);
+       release_region(pld_base, TSCAN1_PLD_SIZE);
+       return -ENXIO;
+}
+
+static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+{
+       struct net_device *netdev;
+       struct sja1000_priv *priv;
+       unsigned long pld_base, sja1000_base;
+
+       netdev = dev_get_drvdata(dev);
+       unregister_sja1000dev(netdev);
+       dev_set_drvdata(dev, NULL);
+
+       priv = netdev_priv(netdev);
+       pld_base = netdev->base_addr;
+       sja1000_base = (unsigned long)priv->reg_base;
+
+       outb(0, pld_base + TSCAN1_MODE);        /* disable SJA1000 IO space */
+
+       release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+       release_region(pld_base, TSCAN1_PLD_SIZE);
+
+       free_sja1000dev(netdev);
+
+       return 0;
+}
+
+static struct isa_driver tscan1_isa_driver = {
+       .probe = tscan1_probe,
+       .remove = __devexit_p(tscan1_remove),
+       .driver = {
+               .name = "tscan1",
+       },
+};
+
+static int __init tscan1_init(void)
+{
+       return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
+}
+module_init(tscan1_init);
+
+static void __exit tscan1_exit(void)
+{
+       isa_unregister_driver(&tscan1_isa_driver);
+}
+module_exit(tscan1_exit);
index a04ce6a..4e3c123 100644 (file)
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
        }
 
        if (!(adap->flags & QUEUES_BOUND)) {
-               err = bind_qsets(adap);
-               if (err) {
-                       CH_ERR(adap, "failed to bind qsets, err %d\n", err);
+               int ret = bind_qsets(adap);
+
+               if (ret < 0) {
+                       CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
                        t3_intr_disable(adap);
                        free_irq_resources(adap);
+                       err = ret;
                        goto out;
                }
                adap->flags |= QUEUES_BOUND;
index eaa49e4..3d4253d 100644 (file)
@@ -281,7 +281,6 @@ struct sge_rspq;
 
 struct port_info {
        struct adapter *adapter;
-       struct vlan_group *vlan_grp;
        u16    viid;
        s16    xact_addr_filt;        /* index of exact MAC address filter */
        u16    rss_size;              /* size of VI's RSS table slice */
index 87054e0..f17703f 100644 (file)
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
         * that step explicitly.
         */
        ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
-                           pi->vlan_grp != NULL, true);
+                           !!(dev->features & NETIF_F_HW_VLAN_RX), true);
        if (ret == 0) {
                ret = t4_change_mac(pi->adapter, mb, pi->viid,
                                    pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
 
 static int set_flags(struct net_device *dev, u32 flags)
 {
-       return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
+       int err;
+       unsigned long old_feat = dev->features;
+
+       err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
+                                  ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+       if (err)
+               return err;
+
+       if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
+               const struct port_info *pi = netdev_priv(dev);
+
+               err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+                                   -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
+                                   true);
+               if (err)
+                       dev->features = old_feat;
+       }
+       return err;
 }
 
 static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
        return 0;
 }
 
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-       struct port_info *pi = netdev_priv(dev);
-
-       pi->vlan_grp = grp;
-       t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
-                     grp != NULL, true);
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void cxgb_netpoll(struct net_device *dev)
 {
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
        .ndo_validate_addr    = eth_validate_addr,
        .ndo_do_ioctl         = cxgb_ioctl,
        .ndo_change_mtu       = cxgb_change_mtu,
-       .ndo_vlan_rx_register = vlan_rx_register,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller  = cxgb_netpoll,
 #endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
                pi->rx_offload = RX_CSO;
                pi->port_id = i;
                netif_carrier_off(netdev);
-               netif_tx_stop_all_queues(netdev);
                netdev->irq = pdev->irq;
 
                netdev->features |= NETIF_F_SG | TSO_FLAGS;
@@ -3730,6 +3736,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 
                        __set_bit(i, &adapter->registered_device_map);
                        adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
+                       netif_tx_stop_all_queues(adapter->port[i]);
                }
        }
        if (!adapter->registered_device_map) {
index 9967f3d..1702225 100644 (file)
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
 
        if (unlikely(pkt->vlan_ex)) {
-               struct port_info *pi = netdev_priv(rxq->rspq.netdev);
-               struct vlan_group *grp = pi->vlan_grp;
-
+               __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
                rxq->stats.vlan_ex++;
-               if (likely(grp)) {
-                       ret = vlan_gro_frags(&rxq->rspq.napi, grp,
-                                            ntohs(pkt->vlan));
-                       goto stats;
-               }
        }
        ret = napi_gro_frags(&rxq->rspq.napi);
-stats: if (ret == GRO_HELD)
+       if (ret == GRO_HELD)
                rxq->stats.lro_pkts++;
        else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
                rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                skb_checksum_none_assert(skb);
 
        if (unlikely(pkt->vlan_ex)) {
-               struct vlan_group *grp = pi->vlan_grp;
-
+               __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
                rxq->stats.vlan_ex++;
-               if (likely(grp))
-                       vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
-               else
-                       dev_kfree_skb_any(skb);
-       } else
-               netif_receive_skb(skb);
-
+       }
+       netif_receive_skb(skb);
        return 0;
 }
 
index a117f2a..4686c39 100644 (file)
@@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
        e1000_clean_all_rx_rings(adapter);
 }
 
-void e1000_reinit_safe(struct e1000_adapter *adapter)
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
 {
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
index 1321cb6..8e745e7 100644 (file)
@@ -396,7 +396,9 @@ struct ehea_port_res {
        int swqe_ll_count;
        u32 swqe_id_counter;
        u64 tx_packets;
+       u64 tx_bytes;
        u64 rx_packets;
+       u64 rx_bytes;
        u32 poll_counter;
        struct net_lro_mgr lro_mgr;
        struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
index bb7d306..182b2a7 100644 (file)
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
        struct ehea_port *port = netdev_priv(dev);
        struct net_device_stats *stats = &port->stats;
        struct hcp_ehea_port_cb2 *cb2;
-       u64 hret, rx_packets, tx_packets;
+       u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
        int i;
 
        memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
                ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
 
        rx_packets = 0;
-       for (i = 0; i < port->num_def_qps; i++)
+       for (i = 0; i < port->num_def_qps; i++) {
                rx_packets += port->port_res[i].rx_packets;
+               rx_bytes   += port->port_res[i].rx_bytes;
+       }
 
        tx_packets = 0;
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                tx_packets += port->port_res[i].tx_packets;
+               tx_bytes   += port->port_res[i].tx_bytes;
+       }
 
        stats->tx_packets = tx_packets;
        stats->multicast = cb2->rxmcp;
        stats->rx_errors = cb2->rxuerr;
-       stats->rx_bytes = cb2->rxo;
-       stats->tx_bytes = cb2->txo;
+       stats->rx_bytes = rx_bytes;
+       stats->tx_bytes = tx_bytes;
        stats->rx_packets = rx_packets;
 
 out_herr:
@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
        int skb_arr_rq2_len = pr->rq2_skba.len;
        int skb_arr_rq3_len = pr->rq3_skba.len;
        int processed, processed_rq1, processed_rq2, processed_rq3;
+       u64 processed_bytes = 0;
        int wqe_index, last_wqe_index, rq, port_reset;
 
        processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
                                processed_rq3++;
                        }
 
+                       processed_bytes += skb->len;
                        ehea_proc_skb(pr, cqe, skb);
                } else {
                        pr->p_stats.poll_receive_errors++;
@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
                lro_flush_all(&pr->lro_mgr);
 
        pr->rx_packets += processed;
+       pr->rx_bytes += processed_bytes;
 
        ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
        ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
        enum ehea_eq_type eq_type = EHEA_EQ;
        struct ehea_qp_init_attr *init_attr = NULL;
        int ret = -EIO;
+       u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+       tx_bytes = pr->tx_bytes;
+       tx_packets = pr->tx_packets;
+       rx_bytes = pr->rx_bytes;
+       rx_packets = pr->rx_packets;
 
        memset(pr, 0, sizeof(struct ehea_port_res));
 
+       pr->tx_bytes = rx_bytes;
+       pr->tx_packets = tx_packets;
+       pr->rx_bytes = rx_bytes;
+       pr->rx_packets = rx_packets;
+
        pr->port = port;
        spin_lock_init(&pr->xmit_lock);
        spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        memset(swqe, 0, SWQE_HEADER_SIZE);
        atomic_dec(&pr->swqe_avail);
 
+       if (vlan_tx_tag_present(skb)) {
+               swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+               swqe->vlan_tag = vlan_tx_tag_get(skb);
+       }
+
+       pr->tx_packets++;
+       pr->tx_bytes += skb->len;
+
        if (skb->len <= SWQE3_MAX_IMM) {
                u32 sig_iv = port->sig_comp_iv;
                u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        pr->swqe_id_counter += 1;
 
-       if (vlan_tx_tag_present(skb)) {
-               swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
-               swqe->vlan_tag = vlan_tx_tag_get(skb);
-       }
-
        if (netif_msg_tx_queued(port)) {
                ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
                ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        ehea_post_swqe(pr->qp, swqe);
-       pr->tx_packets++;
 
        if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
                spin_lock_irqsave(&pr->netif_queue, flags);
index 4c4cc80..49e4ce1 100644 (file)
@@ -2511,7 +2511,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                                skb_recycle_check(skb, priv->rx_buffer_size +
                                        RXBUF_ALIGNMENT)) {
                        gfar_align_skb(skb);
-                       __skb_queue_head(&priv->rx_recycle, skb);
+                       skb_queue_head(&priv->rx_recycle, skb);
                } else
                        dev_kfree_skb_any(skb);
 
@@ -2594,7 +2594,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb = NULL;
 
-       skb = __skb_dequeue(&priv->rx_recycle);
+       skb = skb_dequeue(&priv->rx_recycle);
        if (!skb)
                skb = gfar_alloc_skb(dev);
 
@@ -2750,7 +2750,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                        if (unlikely(!newskb))
                                newskb = skb;
                        else if (skb)
-                               __skb_queue_head(&priv->rx_recycle, skb);
+                               skb_queue_head(&priv->rx_recycle, skb);
                } else {
                        /* Increment the number of packets */
                        rx_queue->stats.rx_packets++;
index d7a975e..d85edf3 100644 (file)
@@ -1623,12 +1623,12 @@ err_out:
        return rc;
 }
 
-#ifdef CONFIG_PM
 static void
 jme_set_100m_half(struct jme_adapter *jme)
 {
        u32 bmcr, tmp;
 
+       jme_phy_on(jme);
        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
        tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
                       BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
                phylink = jme_linkstat_from_phy(jme);
        }
 }
-#endif
 
 static inline void
 jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
 }
 
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+       if (jme->reg_pmcs) {
+               jme_set_100m_half(jme);
+
+               if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+                       jme_wait_link(jme);
+
+               jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+       } else {
+               jme_phy_off(jme);
+       }
+}
+
 static int
 jme_close(struct net_device *netdev)
 {
@@ -2991,6 +3005,16 @@ jme_remove_one(struct pci_dev *pdev)
 
 }
 
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       jme_powersave_phy(jme);
+       pci_pme_active(pdev, true);
+}
+
 #ifdef CONFIG_PM
 static int
 jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3052,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
        tasklet_hi_enable(&jme->rxempty_task);
 
        pci_save_state(pdev);
-       if (jme->reg_pmcs) {
-               jme_set_100m_half(jme);
-
-               if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
-                       jme_wait_link(jme);
-
-               jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
-               pci_enable_wake(pdev, PCI_D3cold, true);
-       } else {
-               jme_phy_off(jme);
-       }
-       pci_set_power_state(pdev, PCI_D3cold);
+       jme_powersave_phy(jme);
+       pci_enable_wake(jme->pdev, PCI_D3hot, true);
+       pci_set_power_state(pdev, PCI_D3hot);
 
        return 0;
 }
@@ -3087,6 +3101,7 @@ static struct pci_driver jme_driver = {
        .suspend        = jme_suspend,
        .resume         = jme_resume,
 #endif /* CONFIG_PM */
+       .shutdown       = jme_shutdown,
 };
 
 static int __init
index 4297f6e..f69e73e 100644 (file)
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
                (unsigned long)status, budget);
 
        work_done = macb_rx(bp, budget);
-       if (work_done < budget)
+       if (work_done < budget) {
                napi_complete(napi);
 
-       /*
-        * We've done what we can to clean the buffers. Make sure we
-        * get notified when new packets arrive.
-        */
-       macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+               /*
+                * We've done what we can to clean the buffers. Make sure we
+                * get notified when new packets arrive.
+                */
+               macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+       }
 
        /* TODO: Handle errors */
 
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                }
 
                if (status & MACB_RX_INT_FLAGS) {
+                       /*
+                        * There's no point taking any more interrupts
+                        * until we have processed the buffers. The
+                        * scheduling call may fail if the poll routine
+                        * is already scheduled, so disable interrupts
+                        * now.
+                        */
+                       macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
                        if (napi_schedule_prep(&bp->napi)) {
-                               /*
-                                * There's no point taking any more interrupts
-                                * until we have processed the buffers
-                                */
-                               macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
                                dev_dbg(&bp->pdev->dev,
                                        "scheduling RX softirq\n");
                                __napi_schedule(&bp->napi);
index b07e4de..02393fd 100644 (file)
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
        return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
 }
 
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 {
        return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
                        MLX4_CMD_TIME_CLASS_B);
 }
 
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       __be64 *inbox;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       inbox = mailbox->buf;
-
-       inbox[0] = cpu_to_be64(virt);
-       inbox[1] = cpu_to_be64(dma_addr);
-
-       err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-       if (!err)
-               mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
-                         (unsigned long long) dma_addr, (unsigned long long) virt);
-
-       return err;
-}
-
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
 {
        return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
index ab56a2f..b10c07a 100644 (file)
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
        return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
 }
 
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
 
index 56371ef..4513395 100644 (file)
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
                        goto out;
                }
        }
+
+       if (free < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        mlx4_dbg(dev, "Free MAC index is %d\n", free);
 
        if (table->total == table->max) {
@@ -224,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
                }
        }
 
+       if (free < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        if (table->total == table->max) {
                /* No free vlan entries */
                err = -ENOSPC;
index 1bb16cb..7670aac 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
  *
  * Returns 0 on success on < 0 on error.
  */
-int phy_clear_interrupt(struct phy_device *phydev)
+static int phy_clear_interrupt(struct phy_device *phydev)
 {
        int err = 0;
 
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
  *
  * Returns 0 on success on < 0 on error.
  */
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
 {
        int err = 0;
 
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
  *   duplexes.  Drop down by one in this order:  1000/FULL,
  *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  */
-void phy_sanitize_settings(struct phy_device *phydev)
+static void phy_sanitize_settings(struct phy_device *phydev)
 {
        u32 features = phydev->supported;
        int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
        phydev->speed = settings[idx].speed;
        phydev->duplex = settings[idx].duplex;
 }
-EXPORT_SYMBOL(phy_sanitize_settings);
 
 /**
  * phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  * phy_enable_interrupts - Enable the interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-int phy_enable_interrupts(struct phy_device *phydev)
+static int phy_enable_interrupts(struct phy_device *phydev)
 {
        int err;
 
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
 
        return err;
 }
-EXPORT_SYMBOL(phy_enable_interrupts);
 
 /**
  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-int phy_disable_interrupts(struct phy_device *phydev)
+static int phy_disable_interrupts(struct phy_device *phydev)
 {
        int err;
 
@@ -574,7 +572,6 @@ phy_err:
 
        return err;
 }
-EXPORT_SYMBOL(phy_disable_interrupts);
 
 /**
  * phy_start_interrupts - request and enable interrupts for a PHY device
index 16ddc77..993c52c 100644 (file)
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
 static LIST_HEAD(phy_fixup_list);
 static DEFINE_MUTEX(phy_fixup_lock);
 
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+                            u32 flags, phy_interface_t interface);
+
 /*
  * Creates a new phy_fixup and adds it to the list
  * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_scan_fixups);
 
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+static struct phy_device* phy_device_create(struct mii_bus *bus,
+                                           int addr, int phy_id)
 {
        struct phy_device *dev;
 
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
 
        return dev;
 }
-EXPORT_SYMBOL(phy_device_create);
 
 /**
  * get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
  *   If you want to monitor your own link state, don't call
  *   this function.
  */
-void phy_prepare_link(struct phy_device *phydev,
+static void phy_prepare_link(struct phy_device *phydev,
                void (*handler)(struct net_device *))
 {
        phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
  *     the attaching device, and given a callback for link status
  *     change.  The phy_device is returned to the attaching driver.
  */
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
-                     u32 flags, phy_interface_t interface)
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+                            u32 flags, phy_interface_t interface)
 {
        struct device *d = &phydev->dev;
 
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
         * (dev_flags and interface) */
        return phy_init_hw(phydev);
 }
-EXPORT_SYMBOL(phy_attach_direct);
 
 /**
  * phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
  *   what is supported.  Returns < 0 on error, 0 if the PHY's advertisement
  *   hasn't changed, and > 0 if it has changed.
  */
-int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev)
 {
        u32 advertise;
        int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
 
        return changed;
 }
-EXPORT_SYMBOL(genphy_config_advert);
 
 /**
  * genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
  *   to the values in phydev. Assumes that the values are valid.
  *   Please see phy_sanitize_settings().
  */
-int genphy_setup_forced(struct phy_device *phydev)
+static int genphy_setup_forced(struct phy_device *phydev)
 {
        int err;
        int ctl = 0;
index 26c37d3..8ecc170 100644 (file)
 #define MAX_CMD_DESCRIPTORS            1024
 #define MAX_RCV_DESCRIPTORS_1G         4096
 #define MAX_RCV_DESCRIPTORS_10G        8192
+#define MAX_RCV_DESCRIPTORS_VF         2048
 #define MAX_JUMBO_RCV_DESCRIPTORS_1G   512
 #define MAX_JUMBO_RCV_DESCRIPTORS_10G  1024
 
 #define DEFAULT_RCV_DESCRIPTORS_1G     2048
 #define DEFAULT_RCV_DESCRIPTORS_10G    4096
+#define DEFAULT_RCV_DESCRIPTORS_VF     1024
 #define MAX_RDS_RINGS                   2
 
 #define get_next_index(index, length)  \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
 #define QLCNIC_LOOPBACK_TEST           2
 
 #define QLCNIC_FILTER_AGE      80
+#define QLCNIC_READD_AGE       20
 #define QLCNIC_LB_MAX_FILTERS  64
 
 struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
        u16 num_txd;
        u16 num_rxd;
        u16 num_jumbo_rxd;
+       u16 max_rxd;
+       u16 max_jumbo_rxd;
 
        u8 max_rds_rings;
        u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
 #define MAX_RX_QUEUES          4
 #define DEFAULT_MAC_LEARN      1
 
-#define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
 #define IS_VALID_BW(bw)                (bw >= MIN_BW && bw <= MAX_BW)
 #define IS_VALID_TX_QUEUES(que)        (que > 0 && que <= MAX_TX_QUEUES)
 #define IS_VALID_RX_QUEUES(que)        (que > 0 && que <= MAX_RX_QUEUES)
index 25e93a5..ec21d24 100644 (file)
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
        ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
        ring->tx_pending = adapter->num_txd;
 
-       if (adapter->ahw.port_type == QLCNIC_GBE) {
-               ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
-               ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
-       } else {
-               ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
-               ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
-       }
-
+       ring->rx_max_pending = adapter->max_rxd;
+       ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
        ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
 
        ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
                struct ethtool_ringparam *ring)
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
-       u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
-       u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
        u16 num_rxd, num_jumbo_rxd, num_txd;
 
-
        if (ring->rx_mini_pending)
                return -EOPNOTSUPP;
 
-       if (adapter->ahw.port_type == QLCNIC_GBE) {
-               max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
-               max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
-       }
-
        num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
-                       MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+                       MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
 
        num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
-                       MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+                       MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+                                               "rx jumbo");
 
        num_txd = qlcnic_validate_ringparam(ring->tx_pending,
                        MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
index f047c7c..7a298cd 100644 (file)
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
 
        dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
                        fw_major, fw_minor, fw_build);
-
        if (adapter->ahw.port_type == QLCNIC_XGBE) {
-               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+                       adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+                       adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+               } else {
+                       adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+                       adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+               }
+
                adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
        } else if (adapter->ahw.port_type == QLCNIC_GBE) {
                adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
                adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
        }
 
        adapter->msix_supported = !!use_msi_x;
@@ -1860,6 +1870,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
        hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
                if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
                            tmp_fil->vlan_id == vlan_id) {
+
+                       if (jiffies >
+                           (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+                               qlcnic_change_filter(adapter, src_addr, vlan_id,
+                                                               tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
index a478786..2282139 100644 (file)
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
 int ql_core_dump(struct ql_adapter *qdev,
                struct ql_mpi_coredump *mpi_coredump);
 int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_wol(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
 int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-void qlge_set_multicast_list(struct net_device *ndev);
 
-#if 1
-#define QL_ALL_DUMP
-#define QL_REG_DUMP
-#define QL_DEV_DUMP
-#define QL_CB_DUMP
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
 /* #define QL_IB_DUMP */
 /* #define QL_OB_DUMP */
-#endif
 
 #ifdef QL_REG_DUMP
 extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
index ba0053d..c30e0fe 100644 (file)
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
 
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
 /* This hardware semaphore causes exclusive access to
  * resources shared between the NIC driver, MPI firmware,
  * FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 
 }
 
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+       qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+
+       if (qdev->vlgrp) {
+               u16 vid;
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
+                       if (!vlan_group_get_device(qdev->vlgrp, vid))
+                               continue;
+                       qlge_vlan_rx_add_vid(qdev->ndev, vid);
+               }
+       }
+}
+
 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
 {
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
                   "MAC address %pM\n", ndev->dev_addr);
 }
 
-int ql_wol(struct ql_adapter *qdev)
+static int ql_wol(struct ql_adapter *qdev)
 {
        int status = 0;
        u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
        clear_bit(QL_PROMISCUOUS, &qdev->flags);
        qlge_set_multicast_list(qdev->ndev);
 
+       /* Restore vlan setting. */
+       qlge_restore_vlan(qdev);
+
        ql_enable_interrupts(qdev);
        ql_enable_all_completion_interrupts(qdev);
        netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
        return &ndev->stats;
 }
 
-void qlge_set_multicast_list(struct net_device *ndev)
+static void qlge_set_multicast_list(struct net_device *ndev)
 {
        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
        struct netdev_hw_addr *ha;
index f84e857..0e7c7c7 100644 (file)
@@ -87,7 +87,7 @@ exit:
        return status;
 }
 
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
 {
        int status;
        status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
 /* Send and ACK mailbox command to the firmware to
  * let it continue with the change.
  */
-int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
 {
        struct mbox_params mbc;
        struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
        return status;
 }
 
-int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
        u32 size)
 {
        int status = 0;
index a9ae505..66c2f1a 100644 (file)
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
        lp->rx_error_count = 0;
        lp->rx_error_dpc_count = 0;
        lp->rx_session_id[0] = 0x50;
-       lp->rx_session_id[0] = 0x48;
-       lp->rx_session_id[0] = 0x44;
-       lp->rx_session_id[0] = 0x42;
+       lp->rx_session_id[1] = 0x48;
+       lp->rx_session_id[2] = 0x44;
+       lp->rx_session_id[3] = 0x42;
        lp->rx_frame_id[0] = 0;
        lp->rx_frame_id[1] = 0;
        lp->rx_frame_id[2] = 0;
index 9265315..3a0cc63 100644 (file)
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
 
        if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
                printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
-               err = -EAGAIN;
+               return -EAGAIN;
        }
 
        err = init_seeq(dev, sp, sregs);
index ac279fa..ab9e3b7 100644 (file)
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
        return 0;
 }
 
-
-/* VJ header compression */
-EXPORT_SYMBOL(slhc_init);
-EXPORT_SYMBOL(slhc_free);
-EXPORT_SYMBOL(slhc_remember);
-EXPORT_SYMBOL(slhc_compress);
-EXPORT_SYMBOL(slhc_uncompress);
-EXPORT_SYMBOL(slhc_toss);
-
 #else /* CONFIG_INET */
 
-
 int
 slhc_toss(struct slcompress *comp)
 {
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
   printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
   return NULL;
 }
+
+#endif /* CONFIG_INET */
+
+/* VJ header compression */
 EXPORT_SYMBOL(slhc_init);
 EXPORT_SYMBOL(slhc_free);
 EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
 EXPORT_SYMBOL(slhc_uncompress);
 EXPORT_SYMBOL(slhc_toss);
 
-#endif /* CONFIG_INET */
 MODULE_LICENSE("Dual BSD/GPL");
index 852e917..30ccbb6 100644 (file)
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
            !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
                return -EINVAL;
 
+       device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
        spin_lock_bh(&tp->lock);
-       if (wol->wolopts & WAKE_MAGIC) {
+       if (device_may_wakeup(dp))
                tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
-               device_set_wakeup_enable(dp, true);
-       } else {
+       else
                tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
-               device_set_wakeup_enable(dp, false);
-       }
        spin_unlock_bh(&tp->lock);
 
+
        return 0;
 }
 
index 663b886..7930203 100644 (file)
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
                tmp = schedule_timeout_interruptible(tmp);
        } while(time_after(tmp, jiffies));
 #else
-       udelay(time);
+       mdelay(time / 1000);
 #endif
 }
 
index 1cc6713..5b83c3f 100644 (file)
        3XP Processor. It has been tested on x86 and sparc64.
 
        KNOWN ISSUES:
-       *) The current firmware always strips the VLAN tag off, even if
-               we tell it not to. You should filter VLANs at the switch
-               as a workaround (good practice in any event) until we can
-               get this fixed.
        *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
                issue. Hopefully 3Com will fix it.
        *) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
        struct pci_dev *        pdev;
        struct net_device *     dev;
        struct napi_struct      napi;
-       spinlock_t              state_lock;
-       struct vlan_group *     vlgrp;
        struct basic_ring       rxHiRing;
        struct basic_ring       rxBuffRing;
        struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
        return err;
 }
 
-static void
-typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-       struct typhoon *tp = netdev_priv(dev);
-       struct cmd_desc xp_cmd;
-       int err;
-
-       spin_lock_bh(&tp->state_lock);
-       if(!tp->vlgrp != !grp) {
-               /* We've either been turned on for the first time, or we've
-                * been turned off. Update the 3XP.
-                */
-               if(grp)
-                       tp->offload |= TYPHOON_OFFLOAD_VLAN;
-               else
-                       tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
-
-               /* If the interface is up, the runtime is running -- and we
-                * must be up for the vlan core to call us.
-                *
-                * Do the command outside of the spin lock, as it is slow.
-                */
-               INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
-                                       TYPHOON_CMD_SET_OFFLOAD_TASKS);
-               xp_cmd.parm2 = tp->offload;
-               xp_cmd.parm3 = tp->offload;
-               spin_unlock_bh(&tp->state_lock);
-               err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
-               if(err < 0)
-                       netdev_err(tp->dev, "vlan offload error %d\n", -err);
-               spin_lock_bh(&tp->state_lock);
-       }
-
-       /* now make the change visible */
-       tp->vlgrp = grp;
-       spin_unlock_bh(&tp->state_lock);
-}
-
 static inline void
 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
                        u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
                first_txd->processFlags |=
                    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
                first_txd->processFlags |=
-                   cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
+                   cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
                                TYPHOON_TX_PF_VLAN_TAG_SHIFT);
        }
 
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
                filter |= TYPHOON_RX_FILTER_MCAST_HASH;
        }
 
-       INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+       INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
        xp_cmd.parm1 = filter;
        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 }
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
        return 1;
 }
 
+static int
+typhoon_set_flags(struct net_device *dev, u32 data)
+{
+       /* There's no way to turn off the RX VLAN offloading and stripping
+        * on the current 3XP firmware -- it does not respect the offload
+        * settings -- so we only allow the user to toggle the TX processing.
+        */
+       if (!(data & ETH_FLAG_RXVLAN))
+               return -EINVAL;
+
+       return ethtool_op_set_flags(dev, data,
+                                   ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+}
+
 static void
 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 {
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
        .set_sg                 = ethtool_op_set_sg,
        .set_tso                = ethtool_op_set_tso,
        .get_ringparam          = typhoon_get_ringparam,
+       .set_flags              = typhoon_set_flags,
+       .get_flags              = ethtool_op_get_flags,
 };
 
 static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
 
        tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
        tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+       tp->offload |= TYPHOON_OFFLOAD_VLAN;
 
        spin_lock_init(&tp->command_lock);
-       spin_lock_init(&tp->state_lock);
 
        /* Force the writes to the shared memory area out before continuing. */
        wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
        tp->rxHiRing.lastWrite = 0;
        tp->rxBuffRing.lastWrite = 0;
        tp->cmdRing.lastWrite = 0;
-       tp->cmdRing.lastWrite = 0;
+       tp->respRing.lastWrite = 0;
 
        tp->txLoRing.lastRead = 0;
        tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
                } else
                        skb_checksum_none_assert(new_skb);
 
-               spin_lock(&tp->state_lock);
-               if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
-                       vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
-                                                ntohl(rx->vlanTag) & 0xffff);
-               else
-                       netif_receive_skb(new_skb);
-               spin_unlock(&tp->state_lock);
+               if (rx->rxStatus & TYPHOON_RX_VLAN)
+                       __vlan_hwaccel_put_tag(new_skb,
+                                              ntohl(rx->vlanTag) & 0xffff);
+               netif_receive_skb(new_skb);
 
                received++;
                budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
                goto error_out;
 
        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
-       spin_lock_bh(&tp->state_lock);
        xp_cmd.parm2 = tp->offload;
        xp_cmd.parm3 = tp->offload;
        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
-       spin_unlock_bh(&tp->state_lock);
        if(err < 0)
                goto error_out;
 
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
        if(!netif_running(dev))
                return 0;
 
-       spin_lock_bh(&tp->state_lock);
-       if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
-               spin_unlock_bh(&tp->state_lock);
-               netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
-               return -EBUSY;
-       }
-       spin_unlock_bh(&tp->state_lock);
+       /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+       if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+               netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
 
        netif_device_detach(dev);
 
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = typhoon_set_mac_address,
        .ndo_change_mtu         = eth_change_mtu,
-       .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
 };
 
 static int __devinit
index 37108fb..969c751 100644 (file)
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
 
 /* features */
 enum {
-       UPT1_F_RXCSUM           = 0x0001,   /* rx csum verification */
-       UPT1_F_RSS              = 0x0002,
-       UPT1_F_RXVLAN           = 0x0004,   /* VLAN tag stripping */
-       UPT1_F_LRO              = 0x0008,
+       UPT1_F_RXCSUM           = cpu_to_le64(0x0001),   /* rx csum verification */
+       UPT1_F_RSS              = cpu_to_le64(0x0002),
+       UPT1_F_RXVLAN           = cpu_to_le64(0x0004),   /* VLAN tag stripping */
+       UPT1_F_LRO              = cpu_to_le64(0x0008),
 };
 #endif
index ca7727b..4d84912 100644 (file)
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
 #define VMXNET3_PM_MAX_PATTERN_SIZE   128
 #define VMXNET3_PM_MAX_MASK_SIZE      (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
 
-#define VMXNET3_PM_WAKEUP_MAGIC       0x01  /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER      0x02  /* wake up on pkts matching
-                                            * filters */
+#define VMXNET3_PM_WAKEUP_MAGIC       cpu_to_le16(0x01)  /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER      cpu_to_le16(0x02)  /* wake up on pkts matching
+                                                         * filters */
 
 
 struct Vmxnet3_PM_PktFilter {
index 3f60e0e..e3658e1 100644 (file)
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                        adapter->vlan_grp = grp;
 
                        /* update FEATURES to device */
-                       set_flag_le64(&devRead->misc.uptFeatures,
-                                     UPT1_F_RXVLAN);
+                       devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
                        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                               VMXNET3_CMD_UPDATE_FEATURE);
                        /*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
                adapter->vlan_grp = NULL;
 
-               if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
+               if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
                        int i;
 
                        for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                                               VMXNET3_CMD_UPDATE_VLAN_FILTERS);
 
                        /* update FEATURES to device */
-                       reset_flag_le64(&devRead->misc.uptFeatures,
-                                       UPT1_F_RXVLAN);
+                       devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
                        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                               VMXNET3_CMD_UPDATE_FEATURE);
                }
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 
        /* set up feature flags */
        if (adapter->rxcsum)
-               set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
+               devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
 
        if (adapter->lro) {
-               set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+               devRead->misc.uptFeatures |= UPT1_F_LRO;
                devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
        }
        if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
            adapter->vlan_grp) {
-               set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
+               devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
        }
 
        devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
                memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
                pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
 
-               set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+               pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
                i++;
        }
 
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
                pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
                in_dev_put(in_dev);
 
-               set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+               pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
                i++;
        }
 
 skip_arp:
        if (adapter->wol & WAKE_MAGIC)
-               set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
+               pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
 
        pmConf->numFilters = i;
 
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
        adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
        adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
                                                                  *pmConf));
-       adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+       adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
                                                                 pmConf));
 
        netif_device_attach(netdev);
index 7e4b5a8..b79070b 100644 (file)
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
                adapter->rxcsum = val;
                if (netif_running(netdev)) {
                        if (val)
-                               set_flag_le64(
-                               &adapter->shared->devRead.misc.uptFeatures,
-                               UPT1_F_RXCSUM);
+                               adapter->shared->devRead.misc.uptFeatures |=
+                               UPT1_F_RXCSUM;
                        else
-                               reset_flag_le64(
-                               &adapter->shared->devRead.misc.uptFeatures,
-                               UPT1_F_RXCSUM);
+                               adapter->shared->devRead.misc.uptFeatures &=
+                               ~UPT1_F_RXCSUM;
 
                        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                               VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
                /* update harware LRO capability accordingly */
                if (lro_requested)
                        adapter->shared->devRead.misc.uptFeatures |=
-                                               cpu_to_le64(UPT1_F_LRO);
+                                                       UPT1_F_LRO;
                else
                        adapter->shared->devRead.misc.uptFeatures &=
-                                               cpu_to_le64(~UPT1_F_LRO);
+                                                       ~UPT1_F_LRO;
                VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                       VMXNET3_CMD_UPDATE_FEATURE);
        }
index c88ea5c..8a2f471 100644 (file)
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
        struct net_device              *netdev;
        struct pci_dev                 *pdev;
 
-       u8                              *hw_addr0; /* for BAR 0 */
-       u8                              *hw_addr1; /* for BAR 1 */
+       u8                      __iomem *hw_addr0; /* for BAR 0 */
+       u8                      __iomem *hw_addr1; /* for BAR 1 */
 
        /* feature control */
        bool                            rxcsum;
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
 #define VMXNET3_MAX_ETH_HDR_SIZE    22
 #define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
 
-static inline void set_flag_le16(__le16 *data, u16 flag)
-{
-       *data = cpu_to_le16(le16_to_cpu(*data) | flag);
-}
-
-static inline void set_flag_le64(__le64 *data, u64 flag)
-{
-       *data = cpu_to_le64(le64_to_cpu(*data) | flag);
-}
-
-static inline void reset_flag_le64(__le64 *data, u64 flag)
-{
-       *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
-}
-
 int
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
 
index 0e6db59..906a3ca 100644 (file)
 #include "vxge-traffic.h"
 #include "vxge-config.h"
 
+static enum vxge_hw_status
+__vxge_hw_fifo_create(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       struct vxge_hw_fifo_attr *attr);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_abort(
+       struct __vxge_hw_fifo *fifoh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_reset(
+       struct __vxge_hw_fifo *ringh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
+                       u32 size);
+
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
+                       struct __vxge_hw_blockpool_entry *entry);
+
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+                                       void *block_addr,
+                                       u32 length,
+                                       struct pci_dev *dma_h,
+                                       struct pci_dev *acc_handle);
+
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+                       struct __vxge_hw_blockpool  *blockpool,
+                       u32 pool_size,
+                       u32 pool_max);
+
+static void
+__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
+
+static void *
+__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
+                       u32 size,
+                       struct vxge_hw_mempool_dma *dma_object);
+
+static void
+__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
+                       void *memblock,
+                       u32 size,
+                       struct vxge_hw_mempool_dma *dma_object);
+
+
+static struct __vxge_hw_channel*
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+                       enum __vxge_hw_channel_type type, u32 length,
+                       u32 per_dtr_space, void *userdata);
+
+static void
+__vxge_hw_channel_free(
+       struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(
+       struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_reset(
+       struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+
+static void
+__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(
+       u32 vp_id,
+       struct vxge_hw_vpath_reg __iomem *vpath_reg,
+       struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(
+       void __iomem    *reg,
+       u64 mask, u32 max_millis);
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+                         u64 mask, u32 max_millis)
+{
+       __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+       wmb();
+
+       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+       wmb();
+
+       return  __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static struct vxge_hw_mempool*
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
+                        u32 item_size, u32 private_size, u32 items_initial,
+                        u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+                        void *userdata);
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+                         struct vxge_hw_vpath_stats_hw_info *hw_stats);
+
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+
+static u64
+__vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
+                                 struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+static u32
+__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+                        u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+
+
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+                          struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+                            u32 operation, u32 offset, u64 *stat);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+
 /*
  * __vxge_hw_channel_allocate - Allocate memory for channel
  * This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
  * Will poll certain register for specified amount of time.
  * Will poll until masked bit is not cleared.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
 {
        u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
  * in progress
  * This routine checks the vpath reset in progress register is turned zero
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
 {
        enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
  * This routine sets the swapper and reads the toc pointer and returns the
  * memory mapped address of the toc
  */
-struct vxge_hw_toc_reg __iomem *
+static struct vxge_hw_toc_reg __iomem *
 __vxge_hw_device_toc_get(void __iomem *bar0)
 {
        u64 val64;
@@ -779,7 +952,7 @@ exit:
  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
  * Get the Statistics on aggregate port
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
                                   struct vxge_hw_xmac_aggr_stats *aggr_stats)
 {
@@ -814,7 +987,7 @@ exit:
  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
  * Get the Statistics on port
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
                                   struct vxge_hw_xmac_port_stats *port_stats)
 {
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
        return 0;
 #endif
 }
-/*
- * vxge_hw_device_debug_mask_get - Get the debug mask
- * This routine returns the current debug mask set
- */
-u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
-       if (hldev == NULL)
-               return 0;
-       return hldev->debug_module_mask;
-#else
-       return 0;
-#endif
-}
 
 /*
  * vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
  *             first block
  * Returns the dma address of the first RxD block
  */
-u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
 {
        struct vxge_hw_mempool_dma *dma_object;
 
@@ -1252,7 +1411,7 @@ exit:
  * This function creates Ring and initializes it.
  *
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
                      struct vxge_hw_ring_attr *attr)
 {
@@ -1363,7 +1522,7 @@ exit:
  * __vxge_hw_ring_abort - Returns the RxD
  * This function terminates the RxDs of ring
  */
-enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
 {
        void *rxdh;
        struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
  * __vxge_hw_ring_reset - Resets the ring
  * This function resets the ring during vpath reset operation
  */
-enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
  * __vxge_hw_ring_delete - Removes the ring
  * This function freeup the memory pool and removes the ring
  */
-enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
 {
        struct __vxge_hw_ring *ring = vp->vpath->ringh;
 
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
  * __vxge_hw_mempool_grow
  * Will resize mempool up to %num_allocate value.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
                       u32 *num_allocated)
 {
@@ -1527,7 +1686,7 @@ exit:
  * with size enough to hold %items_initial number of items. Memory is
  * DMA-able but client must map/unmap before interoperating with the device.
  */
-struct vxge_hw_mempool*
+static struct vxge_hw_mempool*
 __vxge_hw_mempool_create(
        struct __vxge_hw_device *devh,
        u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
 /*
  * vxge_hw_mempool_destroy
  */
-void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
 {
        u32 i, j;
        struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
  * Check the vpath configuration
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
 {
        enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
  * Set the swapper bits appropriately for the lagacy section.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
 {
        u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  * Set the swapper bits appropriately for the vpath.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
 {
 #ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
  * Set the swapper bits appropriately for the vpath.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_kdfc_swapper_set(
        struct vxge_hw_legacy_reg __iomem *legacy_reg,
        struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2020,28 +2179,6 @@ __vxge_hw_kdfc_swapper_set(
        return VXGE_HW_OK;
 }
 
-/*
- * vxge_hw_mgmt_device_config - Retrieve device configuration.
- * Get device configuration. Permits to retrieve at run-time configuration
- * values that were used to initialize and configure the device.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
-                          struct vxge_hw_device_config *dev_config, int size)
-{
-
-       if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
-               return VXGE_HW_ERR_INVALID_DEVICE;
-
-       if (size != sizeof(struct vxge_hw_device_config))
-               return VXGE_HW_ERR_VERSION_CONFLICT;
-
-       memcpy(dev_config, &hldev->config,
-               sizeof(struct vxge_hw_device_config));
-
-       return VXGE_HW_OK;
-}
-
 /*
  * vxge_hw_mgmt_reg_read - Read Titan register.
  */
@@ -2438,7 +2575,7 @@ exit:
  * __vxge_hw_fifo_abort - Returns the TxD
  * This function terminates the TxDs of fifo
  */
-enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
 {
        void *txdlh;
 
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
  * __vxge_hw_fifo_reset - Resets the fifo
  * This function resets the fifo during vpath reset operation
  */
-enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
 
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
  *                          in pci config space.
  * Read from the vpath pci config space.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
                         u32 phy_func_0, u32 offset, u32 *val)
 {
@@ -2542,7 +2679,7 @@ exit:
  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
  * Returns the function number of the vpath.
  */
-u32
+static u32
 __vxge_hw_vpath_func_id_get(u32 vp_id,
        struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
 {
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
  * part number and product description.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_card_info_get(
        u32 vp_id,
        struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
  * __vxge_hw_vpath_fw_ver_get - Get the fw version
  * Returns FW Version
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_fw_ver_get(
        u32 vp_id,
        struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
  * Returns pci function mode
  */
-u64
+static u64
 __vxge_hw_vpath_pci_func_mode_get(
        u32  vp_id,
        struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
  *               from MAC address table.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_addr_get(
        u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
        u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
  * This routine checks the vpath_rst_in_prog register to see if
  * adapter completed the reset process for the vpath
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
 {
        enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
  * __vxge_hw_vpath_reset
  * This routine resets the vpath on the device
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
  * __vxge_hw_vpath_sw_reset
  * This routine resets the vpath structures
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
  * This routine configures the prc registers of virtual path using the config
  * passed
  */
-void
+static void
 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  * This routine configures the kdfc registers of virtual path using the
  * config passed
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3553,7 +3690,7 @@ exit:
  * __vxge_hw_vpath_mac_configure
  * This routine configures the mac of virtual path using the config passed
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  * This routine configures the tim registers of virtual path using the config
  * passed
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
  * This routine is the final phase of init which initializes the
  * registers of the vpath using the configuration passed.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3966,7 +4103,7 @@ exit:
  * This routine is the initial phase of init which resets the vpath and
  * initializes the software support structures.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
                        struct vxge_hw_vp_config *config)
 {
@@ -4022,7 +4159,7 @@ exit:
  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
  * This routine closes all channels it opened and freeup memory
  */
-void
+static void
 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
  * Enable the DMA vpath statistics. The function is to be called to re-enable
  * the adapter to update stats into the host memory
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
  *                           and offset and perform an operation
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
                             u32 operation, u32 offset, u64 *stat)
 {
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
 /*
  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_xmac_tx_stats_get(
        struct __vxge_hw_virtualpath *vpath,
        struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
 /*
  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                       struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
 {
        u64 *val64;
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
 /*
  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
  */
-enum vxge_hw_status __vxge_hw_vpath_stats_get(
-                       struct __vxge_hw_virtualpath *vpath,
-                       struct vxge_hw_vpath_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
 {
        u64 val64;
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
        return status;
 }
 
+
+static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
+                                       unsigned long size)
+{
+       gfp_t flags;
+       void *vaddr;
+
+       if (in_interrupt())
+               flags = GFP_ATOMIC | GFP_DMA;
+       else
+               flags = GFP_KERNEL | GFP_DMA;
+
+       vaddr = kmalloc((size), flags);
+
+       vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+                            struct pci_dev **p_dma_acch)
+{
+       unsigned long misaligned = *(unsigned long *)p_dma_acch;
+       u8 *tmp = (u8 *)vaddr;
+       tmp -= misaligned;
+       kfree((void *)tmp);
+}
+
 /*
  * __vxge_hw_blockpool_create - Create block pool
  */
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
  * Adds a block to block pool
  */
-void vxge_hw_blockpool_block_add(
-                       struct __vxge_hw_device *devh,
-                       void *block_addr,
-                       u32 length,
-                       struct pci_dev *dma_h,
-                       struct pci_dev *acc_handle)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+                                       void *block_addr,
+                                       u32 length,
+                                       struct pci_dev *dma_h,
+                                       struct pci_dev *acc_handle)
 {
        struct __vxge_hw_blockpool  *blockpool;
        struct __vxge_hw_blockpool_entry  *entry = NULL;
index 1a94343..5c00861 100644 (file)
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
        char    version[VXGE_HW_FW_STRLEN];
 };
 
-u64
-__vxge_hw_vpath_pci_func_mode_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
 /**
  * struct vxge_hw_fifo_config - Configuration of fifo.
  * @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
        u8 hash_type_ipv6ex_en;
 };
 
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
 void vxge_hw_device_debug_set(
        struct __vxge_hw_device *devh,
        enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
 u32
 vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
 
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
 /**
  * vxge_hw_ring_rxd_size_get   - Get the size of ring descriptor.
  * @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
        struct vxge_hw_fifo_attr        fifo_attr;
 };
 
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
-                       struct __vxge_hw_blockpool  *blockpool,
-                       u32 pool_size,
-                       u32 pool_max);
-
-void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
-
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
-                       u32 size);
-
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
-                       struct __vxge_hw_blockpool_entry *entry);
-
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
-                       u32 size,
-                       struct vxge_hw_mempool_dma *dma_object);
-
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
-                       void *memblock,
-                       u32 size,
-                       struct vxge_hw_mempool_dma *dma_object);
-
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
-               struct vxge_hw_device_config    *dev_config, int size);
-
 enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
        void __iomem *bar0,
        struct vxge_hw_device_hw_info *hw_info);
 
-enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
-       u32     vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info);
-
 enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
        struct vxge_hw_device_config *device_config);
 
@@ -1954,38 +1893,6 @@ out:
        return vaddr;
 }
 
-extern void vxge_hw_blockpool_block_add(
-                       struct __vxge_hw_device *devh,
-                       void *block_addr,
-                       u32 length,
-                       struct pci_dev *dma_h,
-                       struct pci_dev *acc_handle);
-
-static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
-                                       unsigned long size)
-{
-       gfp_t flags;
-       void *vaddr;
-
-       if (in_interrupt())
-               flags = GFP_ATOMIC | GFP_DMA;
-       else
-               flags = GFP_KERNEL | GFP_DMA;
-
-       vaddr = kmalloc((size), flags);
-
-       vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
-                       struct pci_dev **p_dma_acch)
-{
-       unsigned long misaligned = *(unsigned long *)p_dma_acch;
-       u8 *tmp = (u8 *)vaddr;
-       tmp -= misaligned;
-       kfree((void *)tmp);
-}
-
 /*
  * __vxge_hw_mempool_item_priv - will return pointer on per item private space
  */
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
                            (*memblock_item_idx) * mempool->items_priv_size;
 }
 
-enum vxge_hw_status
-__vxge_hw_mempool_grow(
-       struct vxge_hw_mempool *mempool,
-       u32 num_allocate,
-       u32 *num_allocated);
-
-struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
-       struct __vxge_hw_device *devh,
-       u32 memblock_size,
-       u32 item_size,
-       u32 private_size,
-       u32 items_initial,
-       u32 items_max,
-       struct vxge_hw_mempool_cbs *mp_callback,
-       void *userdata);
-
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-                       enum __vxge_hw_channel_type type, u32 length,
-                       u32 per_dtr_space, void *userdata);
-
-void
-__vxge_hw_channel_free(
-       struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_initialize(
-       struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_reset(
-       struct __vxge_hw_channel *channel);
-
 /*
  * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
  * for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
        struct vxge_hw_vpath_attr *attr,
        struct __vxge_hw_vpath_handle **vpath_handle);
 
-enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
-
 enum vxge_hw_status vxge_hw_vpath_close(
        struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
        struct __vxge_hw_vpath_handle *vpath_handle,
        u32 new_mtu);
 
-enum vxge_hw_status vxge_hw_vpath_stats_enable(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_access(
-       struct __vxge_hw_virtualpath    *vpath,
-       u32                     operation,
-       u32                     offset,
-       u64                     *stat);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
-       struct __vxge_hw_virtualpath    *vpath,
-       struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(
-       struct __vxge_hw_virtualpath    *vpath,
-       struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_get(
-       struct __vxge_hw_virtualpath *vpath,
-       struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
 void
 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
 
-enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
-
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_device_register_poll(
-       void __iomem    *reg,
-       u64 mask, u32 max_millis);
 
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
        writel(val, addr);
 }
 
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
-                         u64 mask, u32 max_millis)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
-       wmb();
-       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
-       wmb();
-
-       status = __vxge_hw_device_register_poll(addr, mask, max_millis);
-       return status;
-}
-
-struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0);
-
-enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
 enum vxge_hw_status
 vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
 
-enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_vpath_pci_read(
-       struct __vxge_hw_virtualpath    *vpath,
-       u32                     phy_func_0,
-       u32                     offset,
-       u32                     *val);
-
-enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN]);
-
-u32
-__vxge_hw_vpath_func_id_get(
-       u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
 enum vxge_hw_status
 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
 /**
  * vxge_debug
  * @level: level of debug verbosity.
index 05679e3..b67746e 100644 (file)
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
        .get_ethtool_stats      = vxge_get_ethtool_stats,
 };
 
-void initialize_ethtool_ops(struct net_device *ndev)
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
 {
        SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
 }
index a69542e..813829f 100644 (file)
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
 
 static struct vxge_drv_config *driver_config;
 
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
        return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
  * This function is called during interrupt context to notify link up state
  * change.
  */
-void
+static void
 vxge_callback_link_up(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
  * This function is called during interrupt context to notify link down state
  * change.
  */
-void
+static void
 vxge_callback_link_down(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
  * If the interrupt is because of a received frame or if the receive ring
  * contains fresh as yet un-processed frames, this function is called.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                 u8 t_code, void *userdata)
 {
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
  * freed and frees all skbs whose data have already DMA'ed into the NICs
  * internal memory.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
                enum vxge_hw_fifo_tcode t_code, void *userdata,
                struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
  *
  * Enables the interrupts for the vpath
 */
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
        int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
  *
  * Disables the interrupts for the vpath
 */
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
        int msix_id;
@@ -1553,7 +1563,7 @@ out:
  *
  * driver may reset the chip on events of serr, eccerr, etc
  */
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
 {
        return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
 }
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
        return status;
 }
 
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct vxge_mac_addrs *new_mac_entry;
        u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 }
 
 /* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
        return status;
 }
 
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct list_head *entry, *next;
        u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
        return FALSE;
 }
 /* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
 }
 
 /* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 }
 
 /* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 }
 
 /* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 }
 
 /* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
 {
        struct vxge_vpath *vpath;
        int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
 }
 
 /* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
 {
        struct vxge_hw_vpath_attr attr;
        enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-int
+static int
 vxge_open(struct net_device *dev)
 {
        enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
 }
 
 /* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
 {
 
        struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
        }
 }
 
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
 {
        enum vxge_hw_status status;
        struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-int
+static int
 vxge_close(struct net_device *dev)
 {
        do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
-                                  struct vxge_config *config,
-                                  int high_dma, int no_of_vpath,
-                                  struct vxgedev **vdev_out)
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+                                         struct vxge_config *config,
+                                         int high_dma, int no_of_vpath,
+                                         struct vxgedev **vdev_out)
 {
        struct net_device *ndev;
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 
        ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
 
-       initialize_ethtool_ops(ndev);
+       vxge_initialize_ethtool_ops(ndev);
 
        /* Allocate memory for vpath */
        vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
  *
  * This function will unregister and free network device
  */
-void
+static void
 vxge_device_unregister(struct __vxge_hw_device *hldev)
 {
        struct vxgedev *vdev;
index d4be07e..de64536 100644 (file)
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
                mod_timer(&timer, (jiffies + exp)); \
        } while (0);
 
-int __devinit vxge_device_register(struct __vxge_hw_device *devh,
-                                   struct vxge_config *config,
-                                   int high_dma, int no_of_vpath,
-                                   struct vxgedev **vdev);
-
-void vxge_device_unregister(struct __vxge_hw_device *devh);
-
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
-
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
-
-void vxge_callback_link_up(struct __vxge_hw_device *devh);
-
-void vxge_callback_link_down(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-       struct macInfo *mac);
-
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-
-int vxge_reset(struct vxgedev *vdev);
-
-enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
-       u8 t_code, void *userdata);
-
-enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
-       enum vxge_hw_fifo_tcode t_code, void *userdata,
-       struct sk_buff ***skb_ptr, int nr_skbs, int *more);
-
-int vxge_close(struct net_device *dev);
-
-int vxge_open(struct net_device *dev);
-
-void vxge_close_vpaths(struct vxgedev *vdev, int index);
-
-int vxge_open_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-       struct macInfo *mac);
-
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
-       struct macInfo *mac);
-
-int vxge_mac_list_add(struct vxge_vpath *vpath,
-       struct macInfo *mac);
-
-void vxge_free_mac_add_list(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-
-int do_vxge_close(struct net_device *dev, int do_io);
-extern void initialize_ethtool_ops(struct net_device *ndev);
+extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
 /**
  * #define VXGE_DEBUG_INIT: debug for initialization functions
  * #define VXGE_DEBUG_TX        : debug transmit related functions
index cedf08f..4bdb611 100644 (file)
 #include "vxge-config.h"
 #include "vxge-main.h"
 
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
+                             u32 vp_id, enum vxge_hw_event type);
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+                             u32 skip_alarms);
+
 /*
  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  * @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
  * Link up indication handler. The function is invoked by HW when
  * Titan indicates that the link is up for programmable amount of time.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
 {
        /*
@@ -538,7 +545,7 @@ exit:
  * Link down indication handler. The function is invoked by HW when
  * Titan indicates that the link is down.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
 {
        /*
@@ -564,7 +571,7 @@ exit:
  *
  * Handle error.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_handle_error(
                struct __vxge_hw_device *hldev,
                u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  * it swaps the reserve and free arrays.
  *
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
 {
        void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
  * Posts a dtr to work array.
  *
  */
-void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
+                                    void *dtrh)
 {
        vxge_assert(channel->work_arr[channel->post_index] == NULL);
 
@@ -1657,37 +1665,6 @@ exit:
        return status;
 }
 
-/**
- * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
- *               from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the next vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
-       u64 data;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_get(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, vid, &data);
-
-       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
-       return status;
-}
-
 /**
  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  *               to vlan id table.
@@ -1898,9 +1875,9 @@ exit:
  * Process vpath alarms.
  *
  */
-enum vxge_hw_status __vxge_hw_vpath_alarm_process(
-                       struct __vxge_hw_virtualpath *vpath,
-                       u32 skip_alarms)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+                             u32 skip_alarms)
 {
        u64 val64;
        u64 alarm_status;
@@ -2264,36 +2241,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
                &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
 }
 
-/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id:  MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
-       struct __vxge_hw_device *hldev = vp->vpath->hldev;
-       if (hldev->config.intr_mode ==
-                       VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
-               __vxge_hw_pio_mem_write32_upper(
-                       (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
-                               &hldev->common_reg->
-                                       clr_msix_one_shot_vec[msix_id%4]);
-       } else {
-               __vxge_hw_pio_mem_write32_upper(
-                       (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
-                               &hldev->common_reg->
-                                       clear_msix_mask_vect[msix_id%4]);
-       }
-}
-
 /**
  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  * @vp: Virtual Path handle.
@@ -2315,22 +2262,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
                        &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
 }
 
-/**
- * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
- * @vp: Virtual Path handle.
- *
- * The function masks all msix interrupt for the given vpath
- *
- */
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
-{
-
-       __vxge_hw_pio_mem_write32_upper(
-               (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
-               &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-}
-
 /**
  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  * @vp: Virtual Path handle.
index 6fa07d1..9890d4d 100644 (file)
@@ -1748,14 +1748,6 @@ vxge_hw_mrpcim_stats_access(
        u32 offset,
        u64 *stat);
 
-enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
-                                  struct vxge_hw_xmac_aggr_stats *aggr_stats);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
-                                  struct vxge_hw_xmac_port_stats *port_stats);
-
 enum vxge_hw_status
 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
                              struct vxge_hw_xmac_stats *xmac_stats);
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
 #endif
 };
 
-/* ========================= RING PRIVATE API ============================= */
-u64
-__vxge_hw_ring_first_block_address_get(
-       struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_create(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       struct vxge_hw_ring_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_ring_abort(
-       struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_reset(
-       struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_delete(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
 /* ========================= FIFO PRIVATE API ============================= */
 
 struct vxge_hw_fifo_attr;
 
-enum vxge_hw_status
-__vxge_hw_fifo_create(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       struct vxge_hw_fifo_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_fifo_abort(
-       struct __vxge_hw_fifo *fifoh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_reset(
-       struct __vxge_hw_fifo *ringh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_delete(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
 struct vxge_hw_mempool_cbs {
        void (*item_func_alloc)(
                        struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
                        u32                     is_last);
 };
 
-void
-__vxge_hw_mempool_destroy(
-       struct vxge_hw_mempool *mempool);
-
 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath)                             \
                ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
 
@@ -2194,62 +2143,11 @@ __vxge_hw_vpath_rts_table_set(
        u64                     data1,
        u64                     data2);
 
-enum vxge_hw_status
-__vxge_hw_vpath_reset(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
 enum vxge_hw_status
 __vxge_hw_vpath_enable(
        struct __vxge_hw_device *devh,
        u32                     vp_id);
 
-void
-__vxge_hw_vpath_prc_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_initialize(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vp_initialize(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id,
-       struct vxge_hw_vp_config        *config);
-
-void
-__vxge_hw_vp_terminate(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(
-       struct __vxge_hw_virtualpath    *vpath,
-       u32                     skip_alarms);
-
 void vxge_hw_device_intr_enable(
        struct __vxge_hw_device *devh);
 
@@ -2320,11 +2218,6 @@ vxge_hw_vpath_vid_get(
        struct __vxge_hw_vpath_handle *vpath_handle,
        u64                     *vid);
 
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       u64                     *vid);
-
 enum vxge_hw_status
 vxge_hw_vpath_vid_delete(
        struct __vxge_hw_vpath_handle *vpath_handle,
@@ -2386,17 +2279,10 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
 
 void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
 
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
-                        int msix_id);
-
 void
 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
                          int msix_id);
 
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
-
 enum vxge_hw_status vxge_hw_vpath_intr_enable(
                                struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
 void
 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
 
-enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
-
-void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
-
 void
 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
                                 void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
 void
 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
 
-/* ========================== PRIVATE API ================================= */
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_error(
-               struct __vxge_hw_device *hldev,
-               u32 vp_id,
-               enum vxge_hw_event type);
-
 #endif
index f1ae75d..8251946 100644 (file)
@@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
        common->ah = sc->ah;
        common->hw = hw;
        common->cachelsz = csz << 2; /* convert to bytes */
+       spin_lock_init(&common->cc_lock);
 
        /* Initialize device */
        ret = ath5k_hw_attach(sc);
index ec98ab5..a14a5e4 100644 (file)
@@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
 
 static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+       {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+       {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+       {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
        {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
        {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
        {0x00008014, 0x044c044c, 0x08980898},
        {0x0000801c, 0x148ec02b, 0x148ec057},
        {0x00008318, 0x000044c0, 0x00008980},
-       {0x00009e00, 0x03721821, 0x03721821},
+       {0x00009e00, 0x0372131c, 0x0372131c},
        {0x0000a230, 0x0000000b, 0x00000016},
        {0x0000a254, 0x00000898, 0x00001130},
 };
@@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
        {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
        {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
        {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
-       {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
-       {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+       {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
        {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
        {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
-       {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
        {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
        {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
-       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+       {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
-       {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+       {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
        {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
        {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
        {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
-       {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+       {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
        {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
        {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x00009814, 0x9280c00a},
        {0x00009818, 0x00000000},
        {0x0000981c, 0x00020028},
-       {0x00009834, 0x5f3ca3de},
+       {0x00009834, 0x6400a290},
        {0x00009838, 0x0108ecff},
        {0x0000983c, 0x14750600},
        {0x00009880, 0x201fff00},
        {0x00009884, 0x00001042},
        {0x000098a4, 0x00200400},
-       {0x000098b0, 0x52440bbe},
+       {0x000098b0, 0x32840bbe},
        {0x000098d0, 0x004b6a8e},
        {0x000098d4, 0x00000820},
        {0x000098dc, 0x00000000},
@@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e3c, 0xcf946222},
        {0x00009e40, 0x0d261820},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
@@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a220, 0x00000000},
        {0x0000a224, 0x00000000},
        {0x0000a228, 0x10002310},
-       {0x0000a22c, 0x01036a1e},
+       {0x0000a22c, 0x01036a27},
        {0x0000a23c, 0x00000000},
        {0x0000a244, 0x0c000000},
        {0x0000a2a0, 0x00000001},
@@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a2c8, 0x00000000},
        {0x0000a2cc, 0x18c43433},
        {0x0000a2d4, 0x00000000},
-       {0x0000a2dc, 0x00000000},
-       {0x0000a2e0, 0x00000000},
-       {0x0000a2e4, 0x00000000},
-       {0x0000a2e8, 0x00000000},
        {0x0000a2ec, 0x00000000},
        {0x0000a2f0, 0x00000000},
        {0x0000a2f4, 0x00000000},
@@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a430, 0x1ce739ce},
        {0x0000a434, 0x00000000},
        {0x0000a438, 0x00001801},
-       {0x0000a43c, 0x00000000},
+       {0x0000a43c, 0x00100000},
        {0x0000a440, 0x00000000},
        {0x0000a444, 0x00000000},
        {0x0000a448, 0x06000080},
        {0x0000a44c, 0x00000001},
        {0x0000a450, 0x00010000},
        {0x0000a458, 0x00000000},
-       {0x0000a600, 0x00000000},
-       {0x0000a604, 0x00000000},
-       {0x0000a608, 0x00000000},
-       {0x0000a60c, 0x00000000},
-       {0x0000a610, 0x00000000},
-       {0x0000a614, 0x00000000},
-       {0x0000a618, 0x00000000},
-       {0x0000a61c, 0x00000000},
-       {0x0000a620, 0x00000000},
-       {0x0000a624, 0x00000000},
-       {0x0000a628, 0x00000000},
-       {0x0000a62c, 0x00000000},
-       {0x0000a630, 0x00000000},
-       {0x0000a634, 0x00000000},
-       {0x0000a638, 0x00000000},
-       {0x0000a63c, 0x00000000},
        {0x0000a640, 0x00000000},
        {0x0000a644, 0x3fad9d74},
        {0x0000a648, 0x0048060a},
-       {0x0000a64c, 0x00000637},
+       {0x0000a64c, 0x00003c37},
        {0x0000a670, 0x03020100},
        {0x0000a674, 0x09080504},
        {0x0000a678, 0x0d0c0b0a},
@@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a8f4, 0x00000000},
        {0x0000b2d0, 0x00000080},
        {0x0000b2d4, 0x00000000},
-       {0x0000b2dc, 0x00000000},
-       {0x0000b2e0, 0x00000000},
-       {0x0000b2e4, 0x00000000},
-       {0x0000b2e8, 0x00000000},
        {0x0000b2ec, 0x00000000},
        {0x0000b2f0, 0x00000000},
        {0x0000b2f4, 0x00000000},
@@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000b8f4, 0x00000000},
        {0x0000c2d0, 0x00000080},
        {0x0000c2d4, 0x00000000},
-       {0x0000c2dc, 0x00000000},
-       {0x0000c2e0, 0x00000000},
-       {0x0000c2e4, 0x00000000},
-       {0x0000c2e8, 0x00000000},
        {0x0000c2ec, 0x00000000},
        {0x0000c2f0, 0x00000000},
        {0x0000c2f4, 0x00000000},
@@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
 
 static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
        {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
        {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
        {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
        {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
-       {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+       {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
        {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
        {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
        {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+       {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
        {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
        {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
 
 static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
        {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
        {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
        {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
-       {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+       {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
        {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
        {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
        {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+       {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
        {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
        {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
        {0x0000b074, 0x00000000},
        {0x0000b078, 0x00000000},
        {0x0000b07c, 0x00000000},
-       {0x0000b080, 0x32323232},
-       {0x0000b084, 0x2f2f3232},
-       {0x0000b088, 0x23282a2d},
-       {0x0000b08c, 0x1c1e2123},
-       {0x0000b090, 0x14171919},
-       {0x0000b094, 0x0e0e1214},
-       {0x0000b098, 0x03050707},
-       {0x0000b09c, 0x00030303},
+       {0x0000b080, 0x2a2d2f32},
+       {0x0000b084, 0x21232328},
+       {0x0000b088, 0x19191c1e},
+       {0x0000b08c, 0x12141417},
+       {0x0000b090, 0x07070e0e},
+       {0x0000b094, 0x03030305},
+       {0x0000b098, 0x00000003},
+       {0x0000b09c, 0x00000000},
        {0x0000b0a0, 0x00000000},
        {0x0000b0a4, 0x00000000},
        {0x0000b0a8, 0x00000000},
@@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
 
 static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+       {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+       {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+       {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
        {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
        {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
        {0x00008144, 0xffffffff},
        {0x00008168, 0x00000000},
        {0x0000816c, 0x00000000},
-       {0x00008170, 0x18486200},
-       {0x00008174, 0x33332210},
-       {0x00008178, 0x00000000},
-       {0x0000817c, 0x00020000},
        {0x000081c0, 0x00000000},
        {0x000081c4, 0x33332210},
        {0x000081c8, 0x00000000},
        {0x000081cc, 0x00000000},
-       {0x000081d4, 0x00000000},
        {0x000081ec, 0x00000000},
        {0x000081f0, 0x00000000},
        {0x000081f4, 0x00000000},
index 7c38229..716db41 100644 (file)
@@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
            (((Y[6] - Y[3]) * 1 << scale_factor) +
             (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
 
+       /* prevent division by zero */
+       if (G_fxp == 0)
+               return false;
+
        Y_intercept =
            (G_fxp * (x_est[0] - x_est[3]) +
             (1 << scale_factor)) / (1 << scale_factor) + Y[3];
@@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
 
        for (i = 0; i <= 3; i++) {
                y_est[i] = i * 32;
-
-               /* prevent division by zero */
-               if (G_fxp == 0)
-                       return false;
-
                x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
        }
 
+       if (y_est[max_index] == 0)
+               return false;
+
        x_est_fxp1_nonlin =
            x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
                                G_fxp) / G_fxp;
@@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
 
        Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
        scale_B = scale_B / (1 << Q_scale_B);
+       if (scale_B == 0)
+               return false;
        Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
        Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
        beta_raw = beta_raw / (1 << Q_beta);
index 4ed010d..19891e7 100644 (file)
@@ -370,7 +370,7 @@ void ath_beacon_tasklet(unsigned long data)
                        ath_print(common, ATH_DBG_BSTUCK,
                                  "beacon is officially stuck\n");
                        sc->sc_flags |= SC_OP_TSF_RESET;
-                       ath_reset(sc, false);
+                       ath_reset(sc, true);
                }
 
                return;
index bc6c4df..95b41db 100644 (file)
@@ -577,6 +577,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        common->hw = sc->hw;
        common->priv = sc;
        common->debug_mask = ath9k_debug;
+       spin_lock_init(&common->cc_lock);
 
        spin_lock_init(&sc->wiphy_lock);
        spin_lock_init(&sc->sc_resetlock);
index 3ff0e47..c6ec800 100644 (file)
@@ -182,6 +182,9 @@ static void ath_update_survey_stats(struct ath_softc *sc)
        struct ath_cycle_counters *cc = &common->cc_survey;
        unsigned int div = common->clockrate * 1000;
 
+       if (!ah->curchan)
+               return;
+
        if (ah->power_mode == ATH9K_PM_AWAKE)
                ath_hw_cycle_counters_update(common);
 
@@ -577,7 +580,7 @@ void ath_hw_check(struct work_struct *work)
 
                msleep(1);
        }
-       ath_reset(sc, false);
+       ath_reset(sc, true);
 
 out:
        ath9k_ps_restore(sc);
@@ -595,7 +598,7 @@ void ath9k_tasklet(unsigned long data)
        ath9k_ps_wakeup(sc);
 
        if (status & ATH9K_INT_FATAL) {
-               ath_reset(sc, false);
+               ath_reset(sc, true);
                ath9k_ps_restore(sc);
                return;
        }
index d077186..30ef2df 100644 (file)
@@ -673,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
        u16 aggr_limit = 0, al = 0, bpad = 0,
                al_delta, h_baw = tid->baw_size / 2;
        enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+       struct ieee80211_tx_info *tx_info;
 
        bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
 
@@ -699,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                        break;
                }
 
+               tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+               if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+                       !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
+                       break;
+
                /* do not exceed subframe limit */
                if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
                        status = ATH_AGGR_LIMITED;
@@ -2157,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
                ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
                          "tx hung, resetting the chip\n");
                ath9k_ps_wakeup(sc);
-               ath_reset(sc, false);
+               ath_reset(sc, true);
                ath9k_ps_restore(sc);
        }
 
index f78728c..568174c 100644 (file)
@@ -116,8 +116,9 @@ __regwrite_out :                                                    \
 } while (0);
 
 
-#define carl9170_async_get_buf()                                       \
+#define carl9170_async_regwrite_get_buf()                              \
 do {                                                                   \
+       __nreg = 0;                                                     \
        __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC,       \
                                 CARL9170_MAX_CMD_PAYLOAD_LEN);         \
        if (__cmd == NULL) {                                            \
@@ -128,38 +129,42 @@ do {                                                                      \
 
 #define carl9170_async_regwrite_begin(carl)                            \
 do {                                                                   \
-       int __nreg = 0, __err = 0;                                      \
        struct ar9170 *__carl = carl;                                   \
        struct carl9170_cmd *__cmd;                                     \
-       carl9170_async_get_buf();                                       \
+       unsigned int __nreg;                                            \
+       int  __err = 0;                                                 \
+       carl9170_async_regwrite_get_buf();                              \
+
+#define carl9170_async_regwrite_flush()                                        \
+do {                                                                   \
+       if (__cmd == NULL || __nreg == 0)                               \
+               break;                                                  \
+                                                                       \
+       if (IS_ACCEPTING_CMD(__carl) && __nreg) {                       \
+               __cmd->hdr.len = 8 * __nreg;                            \
+               __err = __carl9170_exec_cmd(__carl, __cmd, true);       \
+               __cmd = NULL;                                           \
+               break;                                                  \
+       }                                                               \
+       goto __async_regwrite_out;                                      \
+} while (0)
 
 #define carl9170_async_regwrite(r, v) do {                             \
+       if (__cmd == NULL)                                              \
+               carl9170_async_regwrite_get_buf();                      \
        __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r);                 \
        __cmd->wreg.regs[__nreg].val = cpu_to_le32(v);                  \
        __nreg++;                                                       \
-       if ((__nreg >= PAYLOAD_MAX/2)) {                                \
-               if (IS_ACCEPTING_CMD(__carl)) {                         \
-                       __cmd->hdr.len = 8 * __nreg;                    \
-                       __err = __carl9170_exec_cmd(__carl, __cmd, true);\
-                       __cmd = NULL;                                   \
-                       carl9170_async_get_buf();                       \
-               } else {                                                \
-                       goto __async_regwrite_out;                      \
-               }                                                       \
-               __nreg = 0;                                             \
-               if (__err)                                              \
-                       goto __async_regwrite_out;                      \
-       }                                                               \
+       if ((__nreg >= PAYLOAD_MAX / 2))                                \
+               carl9170_async_regwrite_flush();                        \
 } while (0)
 
-#define carl9170_async_regwrite_finish()                               \
+#define carl9170_async_regwrite_finish() do {                          \
 __async_regwrite_out :                                                 \
-       if (__err == 0 && __nreg) {                                     \
-               __cmd->hdr.len = 8 * __nreg;                            \
-               if (IS_ACCEPTING_CMD(__carl))                           \
-                       __err = __carl9170_exec_cmd(__carl, __cmd, true);\
-               __nreg = 0;                                             \
-       }
+       if (__cmd != NULL && __err == 0)                                \
+               carl9170_async_regwrite_flush();                        \
+       kfree(__cmd);                                                   \
+} while (0)                                                            \
 
 #define carl9170_async_regwrite_result()                               \
        __err;                                                          \
index 3cc99f3..980ae70 100644 (file)
@@ -639,8 +639,8 @@ init:
                if (err)
                        goto unlock;
        } else {
-               err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
                rcu_read_unlock();
+               err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
 
                if (err)
                        goto unlock;
index c7f6193..d8607f4 100644 (file)
@@ -591,16 +591,23 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
                        const bool free_buf)
 {
        struct urb *urb;
+       int err = 0;
 
-       if (!IS_INITIALIZED(ar))
-               return -EPERM;
+       if (!IS_INITIALIZED(ar)) {
+               err = -EPERM;
+               goto err_free;
+       }
 
-       if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
-               return -EINVAL;
+       if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
+               err = -EINVAL;
+               goto err_free;
+       }
 
        urb = usb_alloc_urb(0, GFP_ATOMIC);
-       if (!urb)
-               return -ENOMEM;
+       if (!urb) {
+               err = -ENOMEM;
+               goto err_free;
+       }
 
        usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
                AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
@@ -613,6 +620,12 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
        usb_free_urb(urb);
 
        return carl9170_usb_submit_cmd_urb(ar);
+
+err_free:
+       if (free_buf)
+               kfree(cmd);
+
+       return err;
 }
 
 int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
index dfec549..e0f2d12 100644 (file)
@@ -2964,7 +2964,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
                                        (2 - i));
                }
 
-               for (j = 0; i < 4; j++) {
+               for (j = 0; j < 4; j++) {
                        if (j < 3) {
                                cur_lna = lna[j];
                                cur_hpf1 = hpf1[j];
index db57aea..2b078a9 100644 (file)
@@ -1227,7 +1227,8 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
        struct ieee80211_tx_info *info;
 
        if (unlikely(!agg->wait_for_ba))  {
-               IWL_ERR(priv, "Received BA when not expected\n");
+               if (unlikely(ba_resp->bitmap))
+                       IWL_ERR(priv, "Received BA when not expected\n");
                return -EINVAL;
        }
 
index 4fe2468..58b4f93 100644 (file)
@@ -1,6 +1,8 @@
 wl1251-objs            = main.o event.o tx.o rx.o ps.o cmd.o \
                          acx.o boot.o init.o debugfs.o io.o
+wl1251_spi-objs                += spi.o
+wl1251_sdio-objs       += sdio.o
 
-obj-$(CONFIG_WL1251)   += wl1251.o
-obj-$(CONFIG_WL1251_SPI)       += spi.o
-obj-$(CONFIG_WL1251_SDIO)      += sdio.o
+obj-$(CONFIG_WL1251)           += wl1251.o
+obj-$(CONFIG_WL1251_SPI)       += wl1251_spi.o
+obj-$(CONFIG_WL1251_SDIO)      += wl1251_sdio.o
index 01f0306..895136f 100644 (file)
@@ -212,8 +212,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
 #endif /* HAVE_PCI_MMAP */
        int ret = 0;
 
-       lock_kernel();
-
        switch (cmd) {
        case PCIIOC_CONTROLLER:
                ret = pci_domain_nr(dev->bus);
@@ -242,7 +240,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
                break;
        };
 
-       unlock_kernel();
        return ret;
 }
 
index f540ff9..e61db9d 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <linux/interrupt.h>
 #include <asm/intel_scu_ipc.h>
 #include <linux/device.h>
 #include <linux/intel_pmic_gpio.h>
index 3222fa3..0f4a53b 100644 (file)
@@ -192,7 +192,7 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv)
       out:return 0;
 }
 
-static struct device rio_bus = {
+struct device rio_bus = {
        .init_name = "rapidio",
 };
 
index 8070e07..1eb82c4 100644 (file)
@@ -48,7 +48,7 @@ DEFINE_SPINLOCK(rio_global_list_lock);
 static int next_destid = 0;
 static int next_switchid = 0;
 static int next_net = 0;
-static int next_comptag;
+static int next_comptag = 1;
 
 static struct timer_list rio_enum_timer =
 TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -121,27 +121,6 @@ static int rio_clear_locks(struct rio_mport *port)
        u32 result;
        int ret = 0;
 
-       /* Assign component tag to all devices */
-       next_comptag = 1;
-       rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
-
-       list_for_each_entry(rdev, &rio_devices, global_list) {
-               /* Mark device as discovered */
-               rio_read_config_32(rdev,
-                                  rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
-                                  &result);
-               rio_write_config_32(rdev,
-                                   rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
-                                   result | RIO_PORT_GEN_DISCOVERED);
-
-               rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
-               rdev->comp_tag = next_comptag++;
-               if (next_comptag >= 0x10000) {
-                       pr_err("RIO: Component Tag Counter Overflow\n");
-                       break;
-               }
-       }
-
        /* Release host device id locks */
        rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
                                  port->host_deviceid);
@@ -162,6 +141,15 @@ static int rio_clear_locks(struct rio_mport *port)
                               rdev->vid, rdev->did);
                        ret = -EINVAL;
                }
+
+               /* Mark device as discovered and enable master */
+               rio_read_config_32(rdev,
+                                  rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+                                  &result);
+               result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER;
+               rio_write_config_32(rdev,
+                                   rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+                                   result);
        }
 
        return ret;
@@ -420,11 +408,27 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
                                                hopcount, RIO_EFB_ERR_MGMNT);
        }
 
+       if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+               rio_mport_read_config_32(port, destid, hopcount,
+                                        RIO_SWP_INFO_CAR, &rdev->swpinfo);
+       }
+
        rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
                                 &rdev->src_ops);
        rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
                                 &rdev->dst_ops);
 
+       if (do_enum) {
+               /* Assign component tag to device */
+               if (next_comptag >= 0x10000) {
+                       pr_err("RIO: Component Tag Counter Overflow\n");
+                       goto cleanup;
+               }
+               rio_mport_write_config_32(port, destid, hopcount,
+                                         RIO_COMPONENT_TAG_CSR, next_comptag);
+               rdev->comp_tag = next_comptag++;
+       }
+
        if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
                if (do_enum) {
                        rio_set_device_id(port, destid, hopcount, next_destid);
@@ -439,9 +443,10 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
 
        /* If a PE has both switch and other functions, show it as a switch */
        if (rio_is_switch(rdev)) {
-               rio_mport_read_config_32(port, destid, hopcount,
-                                        RIO_SWP_INFO_CAR, &rdev->swpinfo);
-               rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
+               rswitch = kzalloc(sizeof(*rswitch) +
+                                 RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
+                                 sizeof(rswitch->nextdev[0]),
+                                 GFP_KERNEL);
                if (!rswitch)
                        goto cleanup;
                rswitch->switchid = next_switchid;
@@ -458,6 +463,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
                                rdid++)
                        rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
                rdev->rswitch = rswitch;
+               rswitch->rdev = rdev;
                dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
                             rdev->rswitch->switchid);
                rio_switch_init(rdev, do_enum);
@@ -478,6 +484,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
        }
 
        rdev->dev.bus = &rio_bus_type;
+       rdev->dev.parent = &rio_bus;
 
        device_initialize(&rdev->dev);
        rdev->dev.release = rio_release_dev;
@@ -717,87 +724,54 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
        return (u16) (result & 0xffff);
 }
 
-/**
- * rio_get_swpinfo_inport- Gets the ingress port number
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns port number being used to access the switch device.
- */
-static u8
-rio_get_swpinfo_inport(struct rio_mport *mport, u16 destid, u8 hopcount)
-{
-       u32 result;
-
-       rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
-                                &result);
-
-       return (u8) (result & 0xff);
-}
-
-/**
- * rio_get_swpinfo_tports- Gets total number of ports on the switch
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns total numbers of ports implemented by the switch device.
- */
-static u8 rio_get_swpinfo_tports(struct rio_mport *mport, u16 destid,
-                                u8 hopcount)
-{
-       u32 result;
-
-       rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
-                                &result);
-
-       return RIO_GET_TOTAL_PORTS(result);
-}
-
-/**
- * rio_net_add_mport- Add a master port to a RIO network
- * @net: RIO network
- * @port: Master port to add
- *
- * Adds a master port to the network list of associated master
- * ports..
- */
-static void rio_net_add_mport(struct rio_net *net, struct rio_mport *port)
-{
-       spin_lock(&rio_global_list_lock);
-       list_add_tail(&port->nnode, &net->mports);
-       spin_unlock(&rio_global_list_lock);
-}
-
 /**
  * rio_enum_peer- Recursively enumerate a RIO network through a master port
  * @net: RIO network being enumerated
  * @port: Master port to send transactions
  * @hopcount: Number of hops into the network
+ * @prev: Previous RIO device connected to the enumerated one
+ * @prev_port: Port on previous RIO device
  *
  * Recursively enumerates a RIO network.  Transactions are sent via the
  * master port passed in @port.
  */
 static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
-                        u8 hopcount)
+                        u8 hopcount, struct rio_dev *prev, int prev_port)
 {
        int port_num;
-       int num_ports;
        int cur_destid;
        int sw_destid;
        int sw_inport;
        struct rio_dev *rdev;
        u16 destid;
+       u32 regval;
        int tmp;
 
+       if (rio_mport_chk_dev_access(port,
+                       RIO_ANY_DESTID(port->sys_size), hopcount)) {
+               pr_debug("RIO: device access check failed\n");
+               return -1;
+       }
+
        if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) {
                pr_debug("RIO: PE already discovered by this host\n");
                /*
                 * Already discovered by this host. Add it as another
-                * master port for the current network.
+                * link to the existing device.
                 */
-               rio_net_add_mport(net, port);
+               rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size),
+                               hopcount, RIO_COMPONENT_TAG_CSR, &regval);
+
+               if (regval) {
+                       rdev = rio_get_comptag((regval & 0xffff), NULL);
+
+                       if (rdev && prev && rio_is_switch(prev)) {
+                               pr_debug("RIO: redundant path to %s\n",
+                                        rio_name(rdev));
+                               prev->rswitch->nextdev[prev_port] = rdev;
+                       }
+               }
+
                return 0;
        }
 
@@ -828,13 +802,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
        if (rdev) {
                /* Add device to the global and bus/net specific list. */
                list_add_tail(&rdev->net_list, &net->devices);
+               rdev->prev = prev;
+               if (prev && rio_is_switch(prev))
+                       prev->rswitch->nextdev[prev_port] = rdev;
        } else
                return -1;
 
        if (rio_is_switch(rdev)) {
                next_switchid++;
-               sw_inport = rio_get_swpinfo_inport(port,
-                               RIO_ANY_DESTID(port->sys_size), hopcount);
+               sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
                rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
                                    port->host_deviceid, sw_inport, 0);
                rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
@@ -847,14 +823,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
                        rdev->rswitch->route_table[destid] = sw_inport;
                }
 
-               num_ports =
-                   rio_get_swpinfo_tports(port, RIO_ANY_DESTID(port->sys_size),
-                                               hopcount);
                pr_debug(
                    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
-                   rio_name(rdev), rdev->vid, rdev->did, num_ports);
+                   rio_name(rdev), rdev->vid, rdev->did,
+                   RIO_GET_TOTAL_PORTS(rdev->swpinfo));
                sw_destid = next_destid;
-               for (port_num = 0; port_num < num_ports; port_num++) {
+               for (port_num = 0;
+                    port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+                    port_num++) {
                        /*Enable Input Output Port (transmitter reviever)*/
                        rio_enable_rx_tx_port(port, 0,
                                              RIO_ANY_DESTID(port->sys_size),
@@ -879,7 +855,8 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
                                                RIO_ANY_DESTID(port->sys_size),
                                                port_num, 0);
 
-                               if (rio_enum_peer(net, port, hopcount + 1) < 0)
+                               if (rio_enum_peer(net, port, hopcount + 1,
+                                                 rdev, port_num) < 0)
                                        return -1;
 
                                /* Update routing tables */
@@ -945,10 +922,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
  */
 static int rio_enum_complete(struct rio_mport *port)
 {
-       u32 tag_csr;
+       u32 regval;
 
-       rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
-       return (tag_csr & 0xffff) ? 1 : 0;
+       rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+                                &regval);
+       return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0;
 }
 
 /**
@@ -966,7 +944,6 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
              u8 hopcount)
 {
        u8 port_num, route_port;
-       int num_ports;
        struct rio_dev *rdev;
        u16 ndestid;
 
@@ -983,13 +960,14 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
                /* Associated destid is how we accessed this switch */
                rdev->rswitch->destid = destid;
 
-               num_ports = rio_get_swpinfo_tports(port, destid, hopcount);
                pr_debug(
                    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
-                   rio_name(rdev), rdev->vid, rdev->did, num_ports);
-               for (port_num = 0; port_num < num_ports; port_num++) {
-                       if (rio_get_swpinfo_inport(port, destid, hopcount) ==
-                           port_num)
+                   rio_name(rdev), rdev->vid, rdev->did,
+                   RIO_GET_TOTAL_PORTS(rdev->swpinfo));
+               for (port_num = 0;
+                    port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+                    port_num++) {
+                       if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
                                continue;
 
                        if (rio_sport_is_active
@@ -1011,6 +989,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
                                                break;
                                }
 
+                               if (ndestid == RIO_ANY_DESTID(port->sys_size))
+                                       continue;
                                rio_unlock_device(port, destid, hopcount);
                                if (rio_disc_peer
                                    (net, port, ndestid, hopcount + 1) < 0)
@@ -1108,8 +1088,7 @@ static void rio_update_route_tables(struct rio_mport *port)
                                if (rswitch->destid == destid)
                                        continue;
 
-                               sport = rio_get_swpinfo_inport(port,
-                                               rswitch->destid, rswitch->hopcount);
+                               sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
 
                                if (rswitch->add_entry) {
                                        rio_route_add_entry(port, rswitch,
@@ -1184,7 +1163,11 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
                /* Enable Input Output Port (transmitter reviever) */
                rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
 
-               if (rio_enum_peer(net, mport, 0) < 0) {
+               /* Set component tag for host */
+               rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR,
+                                         next_comptag++);
+
+               if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) {
                        /* A higher priority host won enumeration, bail. */
                        printk(KERN_INFO
                               "RIO: master port %d device has lost enumeration to a remote host\n",
index 00b4756..137ed93 100644 (file)
@@ -40,9 +40,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
        char *str = buf;
        int i;
 
-       if (!rdev->rswitch)
-               goto out;
-
        for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
                        i++) {
                if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
@@ -52,7 +49,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
                            rdev->rswitch->route_table[i]);
        }
 
-      out:
        return (str - buf);
 }
 
@@ -63,10 +59,11 @@ struct device_attribute rio_dev_attrs[] = {
        __ATTR_RO(asm_did),
        __ATTR_RO(asm_vid),
        __ATTR_RO(asm_rev),
-       __ATTR_RO(routes),
        __ATTR_NULL,
 };
 
+static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
+
 static ssize_t
 rio_read_config(struct file *filp, struct kobject *kobj,
                struct bin_attribute *bin_attr,
@@ -218,7 +215,17 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
 {
        int err = 0;
 
-       err = sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr);
+       err = device_create_bin_file(&rdev->dev, &rio_config_attr);
+
+       if (!err && rdev->rswitch) {
+               err = device_create_file(&rdev->dev, &dev_attr_routes);
+               if (!err && rdev->rswitch->sw_sysfs)
+                       err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
+       }
+
+       if (err)
+               pr_warning("RIO: Failed to create attribute file(s) for %s\n",
+                          rio_name(rdev));
 
        return err;
 }
@@ -231,5 +238,10 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
  */
 void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
 {
-       sysfs_remove_bin_file(&rdev->dev.kobj, &rio_config_attr);
+       device_remove_bin_file(&rdev->dev, &rio_config_attr);
+       if (rdev->rswitch) {
+               device_remove_file(&rdev->dev, &dev_attr_routes);
+               if (rdev->rswitch->sw_sysfs)
+                       rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
+       }
 }
index 74e9d22..68cf0c9 100644 (file)
@@ -443,7 +443,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
  * @from is not %NULL, searches continue from next device on the global
  * list.
  */
-static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
+struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
 {
        struct list_head *n;
        struct rio_dev *rdev;
@@ -494,6 +494,232 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
        return 0;
 }
 
+/**
+ * rio_chk_dev_route - Validate route to the specified device.
+ * @rdev:  RIO device failed to respond
+ * @nrdev: Last active device on the route to rdev
+ * @npnum: nrdev's port number on the route to rdev
+ *
+ * Follows a route to the specified RIO device to determine the last available
+ * device (and corresponding RIO port) on the route.
+ */
+static int
+rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
+{
+       u32 result;
+       int p_port, dstid, rc = -EIO;
+       struct rio_dev *prev = NULL;
+
+       /* Find switch with failed RIO link */
+       while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) {
+               if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) {
+                       prev = rdev->prev;
+                       break;
+               }
+               rdev = rdev->prev;
+       }
+
+       if (prev == NULL)
+               goto err_out;
+
+       dstid = (rdev->pef & RIO_PEF_SWITCH) ?
+                       rdev->rswitch->destid : rdev->destid;
+       p_port = prev->rswitch->route_table[dstid];
+
+       if (p_port != RIO_INVALID_ROUTE) {
+               pr_debug("RIO: link failed on [%s]-P%d\n",
+                        rio_name(prev), p_port);
+               *nrdev = prev;
+               *npnum = p_port;
+               rc = 0;
+       } else
+               pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev));
+err_out:
+       return rc;
+}
+
+/**
+ * rio_mport_chk_dev_access - Validate access to the specified device.
+ * @mport: Master port to send transactions
+ * @destid: Device destination ID in network
+ * @hopcount: Number of hops into the network
+ */
+int
+rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
+{
+       int i = 0;
+       u32 tmp;
+
+       while (rio_mport_read_config_32(mport, destid, hopcount,
+                                       RIO_DEV_ID_CAR, &tmp)) {
+               i++;
+               if (i == RIO_MAX_CHK_RETRY)
+                       return -EIO;
+               mdelay(1);
+       }
+
+       return 0;
+}
+
+/**
+ * rio_chk_dev_access - Validate access to the specified device.
+ * @rdev: Pointer to RIO device control structure
+ */
+static int rio_chk_dev_access(struct rio_dev *rdev)
+{
+       u8 hopcount = 0xff;
+       u16 destid = rdev->destid;
+
+       if (rdev->rswitch) {
+               destid = rdev->rswitch->destid;
+               hopcount = rdev->rswitch->hopcount;
+       }
+
+       return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+}
+
+/**
+ * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and
+ *                        returns link-response (if requested).
+ * @rdev: RIO devive to issue Input-status command
+ * @pnum: Device port number to issue the command
+ * @lnkresp: Response from a link partner
+ */
+static int
+rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       u32 regval;
+       int checkcount;
+
+       if (lnkresp) {
+               /* Read from link maintenance response register
+                * to clear valid bit */
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+                       &regval);
+               udelay(50);
+       }
+
+       /* Issue Input-status command */
+       rio_mport_write_config_32(mport, destid, hopcount,
+               rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+               RIO_MNT_REQ_CMD_IS);
+
+       /* Exit if the response is not expected */
+       if (lnkresp == NULL)
+               return 0;
+
+       checkcount = 3;
+       while (checkcount--) {
+               udelay(50);
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+                       &regval);
+               if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
+                       *lnkresp = regval;
+                       return 0;
+               }
+       }
+
+       return -EIO;
+}
+
+/**
+ * rio_clr_err_stopped - Clears port Error-stopped states.
+ * @rdev: Pointer to RIO device control structure
+ * @pnum: Switch port number to clear errors
+ * @err_status: port error status (if 0 reads register from device)
+ */
+static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
+       u32 regval;
+       u32 far_ackid, far_linkstat, near_ackid;
+
+       if (err_status == 0)
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+                       &err_status);
+
+       if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+               pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
+               /*
+                * Send a Link-Request/Input-Status control symbol
+                */
+               if (rio_get_input_status(rdev, pnum, &regval)) {
+                       pr_debug("RIO_EM: Input-status response timeout\n");
+                       goto rd_err;
+               }
+
+               pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n",
+                        pnum, regval);
+               far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
+               far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+                       &regval);
+               pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
+               near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
+               pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \
+                        " near_ackID=0x%02x\n",
+                       pnum, far_ackid, far_linkstat, near_ackid);
+
+               /*
+                * If required, synchronize ackIDs of near and
+                * far sides.
+                */
+               if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) ||
+                   (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) {
+                       /* Align near outstanding/outbound ackIDs with
+                        * far inbound.
+                        */
+                       rio_mport_write_config_32(mport, destid,
+                               hopcount, rdev->phys_efptr +
+                                       RIO_PORT_N_ACK_STS_CSR(pnum),
+                               (near_ackid << 24) |
+                                       (far_ackid << 8) | far_ackid);
+                       /* Align far outstanding/outbound ackIDs with
+                        * near inbound.
+                        */
+                       far_ackid++;
+                       if (nextdev)
+                               rio_write_config_32(nextdev,
+                                       nextdev->phys_efptr +
+                                       RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
+                                       (far_ackid << 24) |
+                                       (near_ackid << 8) | near_ackid);
+                       else
+                               pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+               }
+rd_err:
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+                       &err_status);
+               pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+       }
+
+       if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+               pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
+               rio_get_input_status(nextdev,
+                                    RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
+               udelay(50);
+
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+                       &err_status);
+               pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+       }
+
+       return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+                             RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+}
+
 /**
  * rio_inb_pwrite_handler - process inbound port-write message
  * @pw_msg: pointer to inbound port-write message
@@ -507,13 +733,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
        struct rio_mport *mport;
        u8 hopcount;
        u16 destid;
-       u32 err_status;
+       u32 err_status, em_perrdet, em_ltlerrdet;
        int rc, portnum;
 
        rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
        if (rdev == NULL) {
-               /* Someting bad here (probably enumeration error) */
-               pr_err("RIO: %s No matching device for CTag 0x%08x\n",
+               /* Device removed or enumeration error */
+               pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
                        __func__, pw_msg->em.comptag);
                return -EIO;
        }
@@ -524,12 +750,11 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
        {
        u32 i;
        for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
-                       pr_debug("0x%02x: %08x %08x %08x %08x",
+                       pr_debug("0x%02x: %08x %08x %08x %08x\n",
                                 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
                                 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
                        i += 4;
        }
-       pr_debug("\n");
        }
 #endif
 
@@ -545,6 +770,26 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
                        return 0;
        }
 
+       portnum = pw_msg->em.is_port & 0xFF;
+
+       /* Check if device and route to it are functional:
+        * Sometimes devices may send PW message(s) just before being
+        * powered down (or link being lost).
+        */
+       if (rio_chk_dev_access(rdev)) {
+               pr_debug("RIO: device access failed - get link partner\n");
+               /* Scan route to the device and identify failed link.
+                * This will replace device and port reported in PW message.
+                * PW message should not be used after this point.
+                */
+               if (rio_chk_dev_route(rdev, &rdev, &portnum)) {
+                       pr_err("RIO: Route trace for %s failed\n",
+                               rio_name(rdev));
+                       return -EIO;
+               }
+               pw_msg = NULL;
+       }
+
        /* For End-point devices processing stops here */
        if (!(rdev->pef & RIO_PEF_SWITCH))
                return 0;
@@ -562,9 +807,6 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
        /*
         * Process the port-write notification from switch
         */
-
-       portnum = pw_msg->em.is_port & 0xFF;
-
        if (rdev->rswitch->em_handle)
                rdev->rswitch->em_handle(rdev, portnum);
 
@@ -573,29 +815,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
                        &err_status);
        pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
 
-       if (pw_msg->em.errdetect) {
-               pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
-                        portnum, pw_msg->em.errdetect);
-               /* Clear EM Port N Error Detect CSR */
-               rio_mport_write_config_32(mport, destid, hopcount,
-                       rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
-       }
+       if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
 
-       if (pw_msg->em.ltlerrdet) {
-               pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
-                        pw_msg->em.ltlerrdet);
-               /* Clear EM L/T Layer Error Detect CSR */
-               rio_mport_write_config_32(mport, destid, hopcount,
-                       rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
-       }
+               if (!(rdev->rswitch->port_ok & (1 << portnum))) {
+                       rdev->rswitch->port_ok |= (1 << portnum);
+                       rio_set_port_lockout(rdev, portnum, 0);
+                       /* Schedule Insertion Service */
+                       pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
+                              rio_name(rdev), portnum);
+               }
 
-       /* Clear Port Errors */
-       rio_mport_write_config_32(mport, destid, hopcount,
-                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
-                       err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
+               /* Clear error-stopped states (if reported).
+                * Depending on the link partner state, two attempts
+                * may be needed for successful recovery.
+                */
+               if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+                                 RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+                       if (rio_clr_err_stopped(rdev, portnum, err_status))
+                               rio_clr_err_stopped(rdev, portnum, 0);
+               }
+       }  else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */
 
-       if (rdev->rswitch->port_ok & (1 << portnum)) {
-               if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
+               if (rdev->rswitch->port_ok & (1 << portnum)) {
                        rdev->rswitch->port_ok &= ~(1 << portnum);
                        rio_set_port_lockout(rdev, portnum, 1);
 
@@ -608,21 +849,32 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
                        pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
                               rio_name(rdev), portnum);
                }
-       } else {
-               if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
-                       rdev->rswitch->port_ok |= (1 << portnum);
-                       rio_set_port_lockout(rdev, portnum, 0);
+       }
 
-                       /* Schedule Insertion Service */
-                       pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
-                              rio_name(rdev), portnum);
-               }
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+       if (em_perrdet) {
+               pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
+                        portnum, em_perrdet);
+               /* Clear EM Port N Error Detect CSR */
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
+       }
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+       if (em_ltlerrdet) {
+               pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
+                        em_ltlerrdet);
+               /* Clear EM L/T Layer Error Detect CSR */
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
        }
 
-       /* Clear Port-Write Pending bit */
+       /* Clear remaining error bits and Port-Write Pending bit */
        rio_mport_write_config_32(mport, destid, hopcount,
                        rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
-                       RIO_PORT_N_ERR_STS_PW_PEND);
+                       err_status);
 
        return 0;
 }
index f27b7a9..b1af414 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/list.h>
 #include <linux/rio.h>
 
+#define RIO_MAX_CHK_RETRY      3
+
 /* Functions internal to the RIO core code */
 
 extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
@@ -22,6 +24,8 @@ extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
                                 u16 destid, u8 hopcount);
 extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
                             u8 hopcount, u32 from);
+extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
+                                   u8 hopcount);
 extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
 extern int rio_enum_mport(struct rio_mport *mport);
 extern int rio_disc_mport(struct rio_mport *mport);
@@ -34,6 +38,7 @@ extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
 extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
                                   u8 hopcount, u16 table);
 extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
+extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
 
 /* Structures internal to the RIO core code */
 extern struct device_attribute rio_dev_attrs[];
index 2b4e9b2..f47fee5 100644 (file)
@@ -20,6 +20,13 @@ config RAPIDIO_TSI568
        ---help---
          Includes support for IDT Tsi568 serial RapidIO switch.
 
+config RAPIDIO_CPS_GEN2
+       bool "IDT CPS Gen.2 SRIO switch support"
+       depends on RAPIDIO
+       default n
+       ---help---
+         Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
 config RAPIDIO_TSI500
        bool "Tsi500 Parallel RapidIO switch support"
        depends on RAPIDIO
index fe4adc3..48d67a6 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_RAPIDIO_TSI57X)    += tsi57x.o
 obj-$(CONFIG_RAPIDIO_CPS_XX)   += idtcps.o
 obj-$(CONFIG_RAPIDIO_TSI568)   += tsi568.o
 obj-$(CONFIG_RAPIDIO_TSI500)   += tsi500.o
+obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
 
 ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
new file mode 100644 (file)
index 0000000..0bb871c
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * IDT CPS Gen.2 Serial RapidIO switch family support
+ *
+ * Copyright 2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+#define LOCAL_RTE_CONF_DESTID_SEL      0x010070
+#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f
+
+#define IDT_LT_ERR_REPORT_EN   0x03100c
+
+#define IDT_PORT_ERR_REPORT_EN(n)      (0x031044 + (n)*0x40)
+#define IDT_PORT_ERR_REPORT_EN_BC      0x03ff04
+
+#define IDT_PORT_ISERR_REPORT_EN(n)    (0x03104C + (n)*0x40)
+#define IDT_PORT_ISERR_REPORT_EN_BC    0x03ff0c
+#define IDT_PORT_INIT_TX_ACQUIRED      0x00000020
+
+#define IDT_LANE_ERR_REPORT_EN(n)      (0x038010 + (n)*0x100)
+#define IDT_LANE_ERR_REPORT_EN_BC      0x03ff10
+
+#define IDT_DEV_CTRL_1         0xf2000c
+#define IDT_DEV_CTRL_1_GENPW           0x02000000
+#define IDT_DEV_CTRL_1_PRSTBEH         0x00000001
+
+#define IDT_CFGBLK_ERR_CAPTURE_EN      0x020008
+#define IDT_CFGBLK_ERR_REPORT          0xf20014
+#define IDT_CFGBLK_ERR_REPORT_GENPW            0x00000002
+
+#define IDT_AUX_PORT_ERR_CAP_EN        0x020000
+#define IDT_AUX_ERR_REPORT_EN  0xf20018
+#define IDT_AUX_PORT_ERR_LOG_I2C       0x00000002
+#define IDT_AUX_PORT_ERR_LOG_JTAG      0x00000001
+
+#define        IDT_ISLTL_ADDRESS_CAP   0x021014
+
+#define IDT_RIO_DOMAIN         0xf20020
+#define IDT_RIO_DOMAIN_MASK            0x000000ff
+
+#define IDT_PW_INFO_CSR                0xf20024
+
+#define IDT_SOFT_RESET         0xf20040
+#define IDT_SOFT_RESET_REQ             0x00030097
+
+#define IDT_I2C_MCTRL          0xf20050
+#define IDT_I2C_MCTRL_GENPW            0x04000000
+
+#define IDT_JTAG_CTRL          0xf2005c
+#define IDT_JTAG_CTRL_GENPW            0x00000002
+
+#define IDT_LANE_CTRL(n)       (0xff8000 + (n)*0x100)
+#define IDT_LANE_CTRL_BC       0xffff00
+#define IDT_LANE_CTRL_GENPW            0x00200000
+#define IDT_LANE_DFE_1_BC      0xffff18
+#define IDT_LANE_DFE_2_BC      0xffff1c
+
+#define IDT_PORT_OPS(n)                (0xf40004 + (n)*0x100)
+#define IDT_PORT_OPS_GENPW             0x08000000
+#define IDT_PORT_OPS_PL_ELOG           0x00000040
+#define IDT_PORT_OPS_LL_ELOG           0x00000020
+#define IDT_PORT_OPS_LT_ELOG           0x00000010
+#define IDT_PORT_OPS_BC                0xf4ff04
+
+#define IDT_PORT_ISERR_DET(n)  (0xf40008 + (n)*0x100)
+
+#define IDT_ERR_CAP            0xfd0000
+#define IDT_ERR_CAP_LOG_OVERWR         0x00000004
+
+#define IDT_ERR_RD             0xfd0004
+
+#define IDT_DEFAULT_ROUTE      0xde
+#define IDT_NO_ROUTE           0xdf
+
+static int
+idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u16 table, u16 route_destid, u8 route_port)
+{
+       /*
+        * Select routing table to update
+        */
+       if (table == RIO_GLOBAL_TABLE)
+               table = 0;
+       else
+               table++;
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 LOCAL_RTE_CONF_DESTID_SEL, table);
+
+       /*
+        * Program destination port for the specified destID
+        */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+                                 (u32)route_destid);
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 RIO_STD_RTE_CONF_PORT_SEL_CSR,
+                                 (u32)route_port);
+       udelay(10);
+
+       return 0;
+}
+
+static int
+idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u16 table, u16 route_destid, u8 *route_port)
+{
+       u32 result;
+
+       /*
+        * Select routing table to read
+        */
+       if (table == RIO_GLOBAL_TABLE)
+               table = 0;
+       else
+               table++;
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 LOCAL_RTE_CONF_DESTID_SEL, table);
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+                                 route_destid);
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+                                RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+       if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result)
+               *route_port = RIO_INVALID_ROUTE;
+       else
+               *route_port = (u8)result;
+
+       return 0;
+}
+
+static int
+idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u16 table)
+{
+       u32 i;
+
+       /*
+        * Select routing table to read
+        */
+       if (table == RIO_GLOBAL_TABLE)
+               table = 0;
+       else
+               table++;
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 LOCAL_RTE_CONF_DESTID_SEL, table);
+
+       for (i = RIO_STD_RTE_CONF_EXTCFGEN;
+            i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) {
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       RIO_STD_RTE_CONF_PORT_SEL_CSR,
+                       (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) |
+                       (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE);
+               i += 4;
+       }
+
+       return 0;
+}
+
+
+static int
+idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u8 sw_domain)
+{
+       /*
+        * Switch domain configuration operates only at global level
+        */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 IDT_RIO_DOMAIN, (u32)sw_domain);
+       return 0;
+}
+
+static int
+idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u8 *sw_domain)
+{
+       u32 regval;
+
+       /*
+        * Switch domain configuration operates only at global level
+        */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                               IDT_RIO_DOMAIN, &regval);
+
+       *sw_domain = (u8)(regval & 0xff);
+
+       return 0;
+}
+
+static int
+idtg2_em_init(struct rio_dev *rdev)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       u32 regval;
+       int i, tmp;
+
+       /*
+        * This routine performs device-specific initialization only.
+        * All standard EM configuration should be performed at upper level.
+        */
+
+       pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+
+       /* Set Port-Write info CSR: PRIO=3 and CRF=1 */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_PW_INFO_CSR, 0x0000e000);
+
+       /*
+        * Configure LT LAYER error reporting.
+        */
+
+       /* Enable standard (RIO.p8) error reporting */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_LT_ERR_REPORT_EN,
+                       REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
+                       REM_LTL_ERR_UNSUPTR);
+
+       /* Use Port-Writes for LT layer error reporting.
+        * Enable per-port reset
+        */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                       IDT_DEV_CTRL_1, &regval);
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_DEV_CTRL_1,
+                       regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
+
+       /*
+        * Configure PORT error reporting.
+        */
+
+       /* Report all RIO.p8 errors supported by device */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+
+       /* Configure reporting of implementation specific errors/events */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+
+       /* Use Port-Writes for port error reporting and enable error logging */
+       tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+       for (i = 0; i < tmp; i++) {
+               rio_mport_read_config_32(mport, destid, hopcount,
+                               IDT_PORT_OPS(i), &regval);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                               IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
+                               IDT_PORT_OPS_PL_ELOG |
+                               IDT_PORT_OPS_LL_ELOG |
+                               IDT_PORT_OPS_LT_ELOG);
+       }
+       /* Overwrite error log if full */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+
+       /*
+        * Configure LANE error reporting.
+        */
+
+       /* Disable line error reporting */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_LANE_ERR_REPORT_EN_BC, 0);
+
+       /* Use Port-Writes for lane error reporting (when enabled)
+        * (do per-lane update because lanes may have different configuration)
+        */
+       tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
+       for (i = 0; i < tmp; i++) {
+               rio_mport_read_config_32(mport, destid, hopcount,
+                               IDT_LANE_CTRL(i), &regval);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                               IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+       }
+
+       /*
+        * Configure AUX error reporting.
+        */
+
+       /* Disable JTAG and I2C Error capture */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_AUX_PORT_ERR_CAP_EN, 0);
+
+       /* Disable JTAG and I2C Error reporting/logging */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_AUX_ERR_REPORT_EN, 0);
+
+       /* Disable Port-Write notification from JTAG */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_JTAG_CTRL, 0);
+
+       /* Disable Port-Write notification from I2C */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                       IDT_I2C_MCTRL, &regval);
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_I2C_MCTRL,
+                       regval & ~IDT_I2C_MCTRL_GENPW);
+
+       /*
+        * Configure CFG_BLK error reporting.
+        */
+
+       /* Disable Configuration Block error capture */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+
+       /* Disable Port-Writes for Configuration Block error reporting */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                       IDT_CFGBLK_ERR_REPORT, &regval);
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_CFGBLK_ERR_REPORT,
+                       regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+
+       /* set TVAL = ~50us */
+       rio_mport_write_config_32(mport, destid, hopcount,
+               rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+
+       return 0;
+}
+
+static int
+idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       u32 regval, em_perrdet, em_ltlerrdet;
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+       if (em_ltlerrdet) {
+               /* Service Logical/Transport Layer Error(s) */
+               if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
+                       /* Implementation specific error reported */
+                       rio_mport_read_config_32(mport, destid, hopcount,
+                                       IDT_ISLTL_ADDRESS_CAP, &regval);
+
+                       pr_debug("RIO: %s Implementation Specific LTL errors" \
+                                " 0x%x @(0x%x)\n",
+                                rio_name(rdev), em_ltlerrdet, regval);
+
+                       /* Clear implementation specific address capture CSR */
+                       rio_mport_write_config_32(mport, destid, hopcount,
+                                       IDT_ISLTL_ADDRESS_CAP, 0);
+
+               }
+       }
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+       if (em_perrdet) {
+               /* Service Port-Level Error(s) */
+               if (em_perrdet & REM_PED_IMPL_SPEC) {
+                       /* Implementation Specific port error reported */
+
+                       /* Get IS errors reported */
+                       rio_mport_read_config_32(mport, destid, hopcount,
+                                       IDT_PORT_ISERR_DET(portnum), &regval);
+
+                       pr_debug("RIO: %s Implementation Specific Port" \
+                                " errors 0x%x\n", rio_name(rdev), regval);
+
+                       /* Clear all implementation specific events */
+                       rio_mport_write_config_32(mport, destid, hopcount,
+                                       IDT_PORT_ISERR_DET(portnum), 0);
+               }
+       }
+
+       return 0;
+}
+
+static ssize_t
+idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct rio_dev *rdev = to_rio_dev(dev);
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       ssize_t len = 0;
+       u32 regval;
+
+       while (!rio_mport_read_config_32(mport, destid, hopcount,
+                                        IDT_ERR_RD, &regval)) {
+               if (!regval)    /* 0 = end of log */
+                       break;
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                                       "%08x\n", regval);
+               if (len >= (PAGE_SIZE - 10))
+                       break;
+       }
+
+       return len;
+}
+
+static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL);
+
+static int idtg2_sysfs(struct rio_dev *rdev, int create)
+{
+       struct device *dev = &rdev->dev;
+       int err = 0;
+
+       if (create == RIO_SW_SYSFS_CREATE) {
+               /* Initialize sysfs entries */
+               err = device_create_file(dev, &dev_attr_errlog);
+               if (err)
+                       dev_err(dev, "Unable create sysfs errlog file\n");
+       } else
+               device_remove_file(dev, &dev_attr_errlog);
+
+       return err;
+}
+
+static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
+{
+       pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+       rdev->rswitch->add_entry = idtg2_route_add_entry;
+       rdev->rswitch->get_entry = idtg2_route_get_entry;
+       rdev->rswitch->clr_table = idtg2_route_clr_table;
+       rdev->rswitch->set_domain = idtg2_set_domain;
+       rdev->rswitch->get_domain = idtg2_get_domain;
+       rdev->rswitch->em_init = idtg2_em_init;
+       rdev->rswitch->em_handle = idtg2_em_handler;
+       rdev->rswitch->sw_sysfs = idtg2_sysfs;
+
+       return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
index 2c790c1..fc9f637 100644 (file)
@@ -117,6 +117,10 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
 
 static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
 {
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+
        pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
        rdev->rswitch->add_entry = idtcps_route_add_entry;
        rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -126,6 +130,12 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
        rdev->rswitch->em_init = NULL;
        rdev->rswitch->em_handle = NULL;
 
+       if (do_enum) {
+               /* set TVAL = ~50us */
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+       }
+
        return 0;
 }
 
index f7fd789..b9a389b 100644 (file)
@@ -29,7 +29,7 @@
 #define SPP_ROUTE_CFG_DESTID(n)        (0x11070 + 0x100*n)
 #define SPP_ROUTE_CFG_PORT(n)  (0x11074 + 0x100*n)
 
-#define TSI568_SP_MODE_BC      0x10004
+#define TSI568_SP_MODE(n)      (0x11004 + 0x100*n)
 #define  TSI568_SP_MODE_PW_DIS 0x08000000
 
 static int
@@ -117,14 +117,19 @@ tsi568_em_init(struct rio_dev *rdev)
        u16 destid = rdev->rswitch->destid;
        u8 hopcount = rdev->rswitch->hopcount;
        u32 regval;
+       int portnum;
 
        pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
 
        /* Make sure that Port-Writes are disabled (for all ports) */
-       rio_mport_read_config_32(mport, destid, hopcount,
-                       TSI568_SP_MODE_BC, &regval);
-       rio_mport_write_config_32(mport, destid, hopcount,
-                       TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
+       for (portnum = 0;
+            portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
+               rio_mport_read_config_32(mport, destid, hopcount,
+                               TSI568_SP_MODE(portnum), &regval);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                               TSI568_SP_MODE(portnum),
+                               regval | TSI568_SP_MODE_PW_DIS);
+       }
 
        return 0;
 }
index d34df72..2003fb6 100644 (file)
@@ -166,7 +166,8 @@ tsi57x_em_init(struct rio_dev *rdev)
 
        pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
 
-       for (portnum = 0; portnum < 16; portnum++) {
+       for (portnum = 0;
+            portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
                /* Make sure that Port-Writes are enabled (for all ports) */
                rio_mport_read_config_32(mport, destid, hopcount,
                                TSI578_SP_MODE(portnum), &regval);
@@ -205,6 +206,10 @@ tsi57x_em_init(struct rio_dev *rdev)
                        portnum++;
        }
 
+       /* set TVAL = ~50us */
+       rio_mport_write_config_32(mport, destid, hopcount,
+               rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
+
        return 0;
 }
 
index 2785a0f..6a77437 100644 (file)
@@ -171,7 +171,8 @@ config RTC_DRV_DS3232
        depends on RTC_CLASS && I2C
        help
          If you say yes here you get support for Dallas Semiconductor
-         DS3232 real-time clock chips.
+         DS3232 real-time clock chips. If an interrupt is associated
+         with the device, the alarm functionality is supported.
 
          This driver can also be built as a module.  If so, the module
          will be called rtc-ds3232.
@@ -952,4 +953,13 @@ config RTC_DRV_JZ4740
          This driver can also be buillt as a module. If so, the module
          will be called rtc-jz4740.
 
+config RTC_DRV_LPC32XX
+       depends on ARCH_LPC32XX
+       tristate "NXP LPC32XX RTC"
+       help
+         This enables support for the NXP RTC in the LPC32XX
+
+         This driver can also be buillt as a module. If so, the module
+         will be called rtc-lpc32xx.
+
 endif # RTC_CLASS
index 0f207b3..7a7cb32 100644 (file)
@@ -51,6 +51,7 @@ obj-$(CONFIG_RTC_DRV_IMXDI)   += rtc-imxdi.o
 obj-$(CONFIG_RTC_DRV_ISL1208)  += rtc-isl1208.o
 obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
 obj-$(CONFIG_RTC_DRV_JZ4740)   += rtc-jz4740.o
+obj-$(CONFIG_RTC_DRV_LPC32XX)  += rtc-lpc32xx.o
 obj-$(CONFIG_RTC_DRV_M41T80)   += rtc-m41t80.o
 obj-$(CONFIG_RTC_DRV_M41T94)   += rtc-m41t94.o
 obj-$(CONFIG_RTC_DRV_M48T35)   += rtc-m48t35.o
index 565562b..e6539cb 100644 (file)
@@ -158,8 +158,10 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
        rtc_dev_prepare(rtc);
 
        err = device_register(&rtc->dev);
-       if (err)
+       if (err) {
+               put_device(&rtc->dev);
                goto exit_kfree;
+       }
 
        rtc_dev_add_device(rtc);
        rtc_sysfs_add_device(rtc);
index d4fb82d..b4b6087 100644 (file)
@@ -2,7 +2,7 @@
  * Blackfin On-Chip Real Time Clock Driver
  *  Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
  *
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
  *
  * Enter bugs at http://blackfin.uclinux.org/
  *
@@ -183,29 +183,33 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
        struct bfin_rtc *rtc = dev_get_drvdata(dev);
        unsigned long events = 0;
        bool write_complete = false;
-       u16 rtc_istat, rtc_ictl;
+       u16 rtc_istat, rtc_istat_clear, rtc_ictl, bits;
 
        dev_dbg_stamp(dev);
 
        rtc_istat = bfin_read_RTC_ISTAT();
        rtc_ictl = bfin_read_RTC_ICTL();
+       rtc_istat_clear = 0;
 
-       if (rtc_istat & RTC_ISTAT_WRITE_COMPLETE) {
-               bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
+       bits = RTC_ISTAT_WRITE_COMPLETE;
+       if (rtc_istat & bits) {
+               rtc_istat_clear |= bits;
                write_complete = true;
                complete(&bfin_write_complete);
        }
 
-       if (rtc_ictl & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
-               if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
-                       bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+       bits = (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+       if (rtc_ictl & bits) {
+               if (rtc_istat & bits) {
+                       rtc_istat_clear |= bits;
                        events |= RTC_AF | RTC_IRQF;
                }
        }
 
-       if (rtc_ictl & RTC_ISTAT_SEC) {
-               if (rtc_istat & RTC_ISTAT_SEC) {
-                       bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
+       bits = RTC_ISTAT_SEC;
+       if (rtc_ictl & bits) {
+               if (rtc_istat & bits) {
+                       rtc_istat_clear |= bits;
                        events |= RTC_UF | RTC_IRQF;
                }
        }
@@ -213,9 +217,10 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
        if (events)
                rtc_update_irq(rtc->rtc_dev, 1, events);
 
-       if (write_complete || events)
+       if (write_complete || events) {
+               bfin_write_RTC_ISTAT(rtc_istat_clear);
                return IRQ_HANDLED;
-       else
+       else
                return IRQ_NONE;
 }
 
@@ -422,9 +427,13 @@ static int __devexit bfin_rtc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
 {
-       if (device_may_wakeup(&pdev->dev)) {
+       struct device *dev = &pdev->dev;
+
+       dev_dbg_stamp(dev);
+
+       if (device_may_wakeup(dev)) {
                enable_irq_wake(IRQ_RTC);
-               bfin_rtc_sync_pending(&pdev->dev);
+               bfin_rtc_sync_pending(dev);
        } else
                bfin_rtc_int_clear(0);
 
@@ -433,7 +442,11 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
 
 static int bfin_rtc_resume(struct platform_device *pdev)
 {
-       if (device_may_wakeup(&pdev->dev))
+       struct device *dev = &pdev->dev;
+
+       dev_dbg_stamp(dev);
+
+       if (device_may_wakeup(dev))
                disable_irq_wake(IRQ_RTC);
 
        /*
index 9de8516..5706355 100644 (file)
@@ -2,6 +2,7 @@
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
  * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -175,6 +176,182 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
                                              DS3232_REG_SECONDS, 7, buf);
 }
 
+/*
+ * DS3232 has two alarm, we only use alarm1
+ * According to linux specification, only support one-shot alarm
+ * no periodic alarm mode
+ */
+static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+       int control, stat;
+       int ret;
+       u8 buf[4];
+
+       mutex_lock(&ds3232->mutex);
+
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+       if (ret < 0)
+               goto out;
+       stat = ret;
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+       if (ret < 0)
+               goto out;
+       control = ret;
+       ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+       if (ret < 0)
+               goto out;
+
+       alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+       alarm->time.tm_min = bcd2bin(buf[1] & 0x7F);
+       alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F);
+       alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F);
+
+       alarm->time.tm_mon = -1;
+       alarm->time.tm_year = -1;
+       alarm->time.tm_wday = -1;
+       alarm->time.tm_yday = -1;
+       alarm->time.tm_isdst = -1;
+
+       alarm->enabled = !!(control & DS3232_REG_CR_A1IE);
+       alarm->pending = !!(stat & DS3232_REG_SR_A1F);
+
+       ret = 0;
+out:
+       mutex_unlock(&ds3232->mutex);
+       return ret;
+}
+
+/*
+ * linux rtc-module does not support wday alarm
+ * and only 24h time mode supported indeed
+ */
+static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+       int control, stat;
+       int ret;
+       u8 buf[4];
+
+       if (client->irq <= 0)
+               return -EINVAL;
+
+       mutex_lock(&ds3232->mutex);
+
+       buf[0] = bin2bcd(alarm->time.tm_sec);
+       buf[1] = bin2bcd(alarm->time.tm_min);
+       buf[2] = bin2bcd(alarm->time.tm_hour);
+       buf[3] = bin2bcd(alarm->time.tm_mday);
+
+       /* clear alarm interrupt enable bit */
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+       if (ret < 0)
+               goto out;
+       control = ret;
+       control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
+       ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+       if (ret < 0)
+               goto out;
+
+       /* clear any pending alarm flag */
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+       if (ret < 0)
+               goto out;
+       stat = ret;
+       stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
+       ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+       if (ret < 0)
+               goto out;
+
+       ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+
+       if (alarm->enabled) {
+               control |= DS3232_REG_CR_A1IE;
+               ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+       }
+out:
+       mutex_unlock(&ds3232->mutex);
+       return ret;
+}
+
+static void ds3232_update_alarm(struct i2c_client *client)
+{
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+       int control;
+       int ret;
+       u8 buf[4];
+
+       mutex_lock(&ds3232->mutex);
+
+       ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+       if (ret < 0)
+               goto unlock;
+
+       buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[0];
+       buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[1];
+       buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[2];
+       buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[3];
+
+       ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+       if (ret < 0)
+               goto unlock;
+
+       control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+       if (control < 0)
+               goto unlock;
+
+       if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF))
+               /* enable alarm1 interrupt */
+               control |= DS3232_REG_CR_A1IE;
+       else
+               /* disable alarm1 interrupt */
+               control &= ~(DS3232_REG_CR_A1IE);
+       i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+
+unlock:
+       mutex_unlock(&ds3232->mutex);
+}
+
+static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+       if (client->irq <= 0)
+               return -EINVAL;
+
+       if (enabled)
+               ds3232->rtc->irq_data |= RTC_AF;
+       else
+               ds3232->rtc->irq_data &= ~RTC_AF;
+
+       ds3232_update_alarm(client);
+       return 0;
+}
+
+static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+       if (client->irq <= 0)
+               return -EINVAL;
+
+       if (enabled)
+               ds3232->rtc->irq_data |= RTC_UF;
+       else
+               ds3232->rtc->irq_data &= ~RTC_UF;
+
+       ds3232_update_alarm(client);
+       return 0;
+}
+
 static irqreturn_t ds3232_irq(int irq, void *dev_id)
 {
        struct i2c_client *client = dev_id;
@@ -222,6 +399,10 @@ unlock:
 static const struct rtc_class_ops ds3232_rtc_ops = {
        .read_time = ds3232_read_time,
        .set_time = ds3232_set_time,
+       .read_alarm = ds3232_read_alarm,
+       .set_alarm = ds3232_set_alarm,
+       .alarm_irq_enable = ds3232_alarm_irq_enable,
+       .update_irq_enable = ds3232_update_irq_enable,
 };
 
 static int __devinit ds3232_probe(struct i2c_client *client,
index 2619d57..2e16f72 100644 (file)
@@ -1,5 +1,6 @@
 /*
  *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ *  Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
  *      JZ4740 SoC RTC driver
  *
  *  This program is free software; you can redistribute it and/or modify it
@@ -161,7 +162,8 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
        if (!ret)
-               ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE, alrm->enabled);
+               ret = jz4740_rtc_ctrl_set_bits(rtc,
+                       JZ_RTC_CTRL_AE | JZ_RTC_CTRL_AF_IRQ, alrm->enabled);
 
        return ret;
 }
@@ -258,6 +260,8 @@ static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
+       device_init_wakeup(&pdev->dev, 1);
+
        rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops,
                                        THIS_MODULE);
        if (IS_ERR(rtc->rtc)) {
@@ -318,12 +322,43 @@ static int __devexit jz4740_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+
+#ifdef CONFIG_PM
+static int jz4740_rtc_suspend(struct device *dev)
+{
+       struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(rtc->irq);
+       return 0;
+}
+
+static int jz4740_rtc_resume(struct device *dev)
+{
+       struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(rtc->irq);
+       return 0;
+}
+
+static const struct dev_pm_ops jz4740_pm_ops = {
+       .suspend = jz4740_rtc_suspend,
+       .resume  = jz4740_rtc_resume,
+};
+#define JZ4740_RTC_PM_OPS (&jz4740_pm_ops)
+
+#else
+#define JZ4740_RTC_PM_OPS NULL
+#endif  /* CONFIG_PM */
+
 struct platform_driver jz4740_rtc_driver = {
-       .probe = jz4740_rtc_probe,
-       .remove = __devexit_p(jz4740_rtc_remove),
-       .driver = {
-               .name = "jz4740-rtc",
+       .probe   = jz4740_rtc_probe,
+       .remove  = __devexit_p(jz4740_rtc_remove),
+       .driver  = {
+               .name  = "jz4740-rtc",
                .owner = THIS_MODULE,
+               .pm    = JZ4740_RTC_PM_OPS,
        },
 };
 
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
new file mode 100644 (file)
index 0000000..ec8701c
--- /dev/null
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * Clock and Power control register offsets
+ */
+#define LPC32XX_RTC_UCOUNT             0x00
+#define LPC32XX_RTC_DCOUNT             0x04
+#define LPC32XX_RTC_MATCH0             0x08
+#define LPC32XX_RTC_MATCH1             0x0C
+#define LPC32XX_RTC_CTRL               0x10
+#define LPC32XX_RTC_INTSTAT            0x14
+#define LPC32XX_RTC_KEY                        0x18
+#define LPC32XX_RTC_SRAM               0x80
+
+#define LPC32XX_RTC_CTRL_MATCH0                (1 << 0)
+#define LPC32XX_RTC_CTRL_MATCH1                (1 << 1)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH0   (1 << 2)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH1   (1 << 3)
+#define LPC32XX_RTC_CTRL_SW_RESET      (1 << 4)
+#define LPC32XX_RTC_CTRL_CNTR_DIS      (1 << 6)
+#define LPC32XX_RTC_CTRL_ONSW_FORCE_HI (1 << 7)
+
+#define LPC32XX_RTC_INTSTAT_MATCH0     (1 << 0)
+#define LPC32XX_RTC_INTSTAT_MATCH1     (1 << 1)
+#define LPC32XX_RTC_INTSTAT_ONSW       (1 << 2)
+
+#define LPC32XX_RTC_KEY_ONSW_LOADVAL   0xB5C13F27
+
+#define RTC_NAME "rtc-lpc32xx"
+
+#define rtc_readl(dev, reg) \
+       __raw_readl((dev)->rtc_base + (reg))
+#define rtc_writel(dev, reg, val) \
+       __raw_writel((val), (dev)->rtc_base + (reg))
+
+struct lpc32xx_rtc {
+       void __iomem *rtc_base;
+       int irq;
+       unsigned char alarm_enabled;
+       struct rtc_device *rtc;
+       spinlock_t lock;
+};
+
+static int lpc32xx_rtc_read_time(struct device *dev, struct rtc_time *time)
+{
+       unsigned long elapsed_sec;
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+       elapsed_sec = rtc_readl(rtc, LPC32XX_RTC_UCOUNT);
+       rtc_time_to_tm(elapsed_sec, time);
+
+       return rtc_valid_tm(time);
+}
+
+static int lpc32xx_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+       u32 tmp;
+
+       spin_lock_irq(&rtc->lock);
+
+       /* RTC must be disabled during count update */
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp | LPC32XX_RTC_CTRL_CNTR_DIS);
+       rtc_writel(rtc, LPC32XX_RTC_UCOUNT, secs);
+       rtc_writel(rtc, LPC32XX_RTC_DCOUNT, 0xFFFFFFFF - secs);
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp &= ~LPC32XX_RTC_CTRL_CNTR_DIS);
+
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static int lpc32xx_rtc_read_alarm(struct device *dev,
+       struct rtc_wkalrm *wkalrm)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+       rtc_time_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time);
+       wkalrm->enabled = rtc->alarm_enabled;
+       wkalrm->pending = !!(rtc_readl(rtc, LPC32XX_RTC_INTSTAT) &
+               LPC32XX_RTC_INTSTAT_MATCH0);
+
+       return rtc_valid_tm(&wkalrm->time);
+}
+
+static int lpc32xx_rtc_set_alarm(struct device *dev,
+       struct rtc_wkalrm *wkalrm)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+       unsigned long alarmsecs;
+       u32 tmp;
+       int ret;
+
+       ret = rtc_tm_to_time(&wkalrm->time, &alarmsecs);
+       if (ret < 0) {
+               dev_warn(dev, "Failed to convert time: %d\n", ret);
+               return ret;
+       }
+
+       spin_lock_irq(&rtc->lock);
+
+       /* Disable alarm during update */
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+
+       rtc_writel(rtc, LPC32XX_RTC_MATCH0, alarmsecs);
+
+       rtc->alarm_enabled = wkalrm->enabled;
+       if (wkalrm->enabled) {
+               rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+                          LPC32XX_RTC_INTSTAT_MATCH0);
+               rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp |
+                          LPC32XX_RTC_CTRL_MATCH0);
+       }
+
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static int lpc32xx_rtc_alarm_irq_enable(struct device *dev,
+       unsigned int enabled)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+       u32 tmp;
+
+       spin_lock_irq(&rtc->lock);
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+
+       if (enabled) {
+               rtc->alarm_enabled = 1;
+               tmp |= LPC32XX_RTC_CTRL_MATCH0;
+       } else {
+               rtc->alarm_enabled = 0;
+               tmp &= ~LPC32XX_RTC_CTRL_MATCH0;
+       }
+
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static irqreturn_t lpc32xx_rtc_alarm_interrupt(int irq, void *dev)
+{
+       struct lpc32xx_rtc *rtc = dev;
+
+       spin_lock(&rtc->lock);
+
+       /* Disable alarm interrupt */
+       rtc_writel(rtc, LPC32XX_RTC_CTRL,
+               rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+                         ~LPC32XX_RTC_CTRL_MATCH0);
+       rtc->alarm_enabled = 0;
+
+       /*
+        * Write a large value to the match value so the RTC won't
+        * keep firing the match status
+        */
+       rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+       rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0);
+
+       spin_unlock(&rtc->lock);
+
+       rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+       return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops lpc32xx_rtc_ops = {
+       .read_time              = lpc32xx_rtc_read_time,
+       .set_mmss               = lpc32xx_rtc_set_mmss,
+       .read_alarm             = lpc32xx_rtc_read_alarm,
+       .set_alarm              = lpc32xx_rtc_set_alarm,
+       .alarm_irq_enable       = lpc32xx_rtc_alarm_irq_enable,
+};
+
+static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct lpc32xx_rtc *rtc;
+       resource_size_t size;
+       int rtcirq;
+       u32 tmp;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Can't get memory resource\n");
+               return -ENOENT;
+       }
+
+       rtcirq = platform_get_irq(pdev, 0);
+       if (rtcirq < 0 || rtcirq >= NR_IRQS) {
+               dev_warn(&pdev->dev, "Can't get interrupt resource\n");
+               rtcirq = -1;
+       }
+
+       rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+       if (unlikely(!rtc)) {
+               dev_err(&pdev->dev, "Can't allocate memory\n");
+               return -ENOMEM;
+       }
+       rtc->irq = rtcirq;
+
+       size = resource_size(res);
+
+       if (!devm_request_mem_region(&pdev->dev, res->start, size,
+                                    pdev->name)) {
+               dev_err(&pdev->dev, "RTC registers are not free\n");
+               return -EBUSY;
+       }
+
+       rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
+       if (!rtc->rtc_base) {
+               dev_err(&pdev->dev, "Can't map memory\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_init(&rtc->lock);
+
+       /*
+        * The RTC is on a seperate power domain and can keep it's state
+        * across a chip power cycle. If the RTC has never been previously
+        * setup, then set it up now for the first time.
+        */
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+       if (rtc_readl(rtc, LPC32XX_RTC_KEY) != LPC32XX_RTC_KEY_ONSW_LOADVAL) {
+               tmp &= ~(LPC32XX_RTC_CTRL_SW_RESET |
+                       LPC32XX_RTC_CTRL_CNTR_DIS |
+                       LPC32XX_RTC_CTRL_MATCH0 |
+                       LPC32XX_RTC_CTRL_MATCH1 |
+                       LPC32XX_RTC_CTRL_ONSW_MATCH0 |
+                       LPC32XX_RTC_CTRL_ONSW_MATCH1 |
+                       LPC32XX_RTC_CTRL_ONSW_FORCE_HI);
+               rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+
+               /* Clear latched interrupt states */
+               rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+               rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+                          LPC32XX_RTC_INTSTAT_MATCH0 |
+                          LPC32XX_RTC_INTSTAT_MATCH1 |
+                          LPC32XX_RTC_INTSTAT_ONSW);
+
+               /* Write key value to RTC so it won't reload on reset */
+               rtc_writel(rtc, LPC32XX_RTC_KEY,
+                          LPC32XX_RTC_KEY_ONSW_LOADVAL);
+       } else {
+               rtc_writel(rtc, LPC32XX_RTC_CTRL,
+                          tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+       }
+
+       platform_set_drvdata(pdev, rtc);
+
+       rtc->rtc = rtc_device_register(RTC_NAME, &pdev->dev, &lpc32xx_rtc_ops,
+               THIS_MODULE);
+       if (IS_ERR(rtc->rtc)) {
+               dev_err(&pdev->dev, "Can't get RTC\n");
+               platform_set_drvdata(pdev, NULL);
+               return PTR_ERR(rtc->rtc);
+       }
+
+       /*
+        * IRQ is enabled after device registration in case alarm IRQ
+        * is pending upon suspend exit.
+        */
+       if (rtc->irq >= 0) {
+               if (devm_request_irq(&pdev->dev, rtc->irq,
+                                    lpc32xx_rtc_alarm_interrupt,
+                                    IRQF_DISABLED, pdev->name, rtc) < 0) {
+                       dev_warn(&pdev->dev, "Can't request interrupt.\n");
+                       rtc->irq = -1;
+               } else {
+                       device_init_wakeup(&pdev->dev, 1);
+               }
+       }
+
+       return 0;
+}
+
+static int __devexit lpc32xx_rtc_remove(struct platform_device *pdev)
+{
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->irq >= 0)
+               device_init_wakeup(&pdev->dev, 0);
+
+       platform_set_drvdata(pdev, NULL);
+       rtc_device_unregister(rtc->rtc);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_rtc_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->irq >= 0) {
+               if (device_may_wakeup(&pdev->dev))
+                       enable_irq_wake(rtc->irq);
+               else
+                       disable_irq_wake(rtc->irq);
+       }
+
+       return 0;
+}
+
+static int lpc32xx_rtc_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->irq >= 0 && device_may_wakeup(&pdev->dev))
+               disable_irq_wake(rtc->irq);
+
+       return 0;
+}
+
+/* Unconditionally disable the alarm */
+static int lpc32xx_rtc_freeze(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       spin_lock_irq(&rtc->lock);
+
+       rtc_writel(rtc, LPC32XX_RTC_CTRL,
+               rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+                         ~LPC32XX_RTC_CTRL_MATCH0);
+
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static int lpc32xx_rtc_thaw(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->alarm_enabled) {
+               spin_lock_irq(&rtc->lock);
+
+               rtc_writel(rtc, LPC32XX_RTC_CTRL,
+                          rtc_readl(rtc, LPC32XX_RTC_CTRL) |
+                          LPC32XX_RTC_CTRL_MATCH0);
+
+               spin_unlock_irq(&rtc->lock);
+       }
+
+       return 0;
+}
+
+static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
+       .suspend = lpc32xx_rtc_suspend,
+       .resume = lpc32xx_rtc_resume,
+       .freeze = lpc32xx_rtc_freeze,
+       .thaw = lpc32xx_rtc_thaw,
+       .restore = lpc32xx_rtc_resume
+};
+
+#define LPC32XX_RTC_PM_OPS (&lpc32xx_rtc_pm_ops)
+#else
+#define LPC32XX_RTC_PM_OPS NULL
+#endif
+
+static struct platform_driver lpc32xx_rtc_driver = {
+       .probe          = lpc32xx_rtc_probe,
+       .remove         = __devexit_p(lpc32xx_rtc_remove),
+       .driver = {
+               .name   = RTC_NAME,
+               .owner  = THIS_MODULE,
+               .pm     = LPC32XX_RTC_PM_OPS
+       },
+};
+
+static int __init lpc32xx_rtc_init(void)
+{
+       return platform_driver_register(&lpc32xx_rtc_driver);
+}
+module_init(lpc32xx_rtc_init);
+
+static void __exit lpc32xx_rtc_exit(void)
+{
+       platform_driver_unregister(&lpc32xx_rtc_driver);
+}
+module_exit(lpc32xx_rtc_exit);
+
+MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
+MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc-lpc32xx");
index 64d9727..73377b0 100644 (file)
@@ -34,7 +34,8 @@
  * Board-specific wiring options include using split power mode with
  * RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
  * and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
- * low power modes).  See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ * low power modes) for OMAP1 boards (OMAP-L138 has this built into
+ * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
  */
 
 #define OMAP_RTC_BASE                  0xfffb4800
@@ -401,16 +402,17 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
 
        /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
         *
-        *  - Boards wired so that RTC_WAKE_INT does something, and muxed
-        *    right (W13_1610_RTC_WAKE_INT is the default after chip reset),
-        *    should initialize the device wakeup flag appropriately.
+        *  - Device wake-up capability setting should come through chip
+        *    init logic. OMAP1 boards should initialize the "wakeup capable"
+        *    flag in the platform device if the board is wired right for
+        *    being woken up by RTC alarm. For OMAP-L138, this capability
+        *    is built into the SoC by the "Deep Sleep" capability.
         *
         *  - Boards wired so RTC_ON_nOFF is used as the reset signal,
         *    rather than nPWRON_RESET, should forcibly enable split
         *    power mode.  (Some chip errata report that RTC_CTRL_SPLIT
         *    is write-only, and always reads as zero...)
         */
-       device_init_wakeup(&pdev->dev, 0);
 
        if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
                pr_info("%s: split power mode\n", pdev->name);
index f57a87f..cf953ec 100644 (file)
@@ -100,7 +100,7 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
        spin_lock_irq(&s3c_rtc_pie_lock);
 
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+               tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
                tmp &= ~S3C64XX_RTCCON_TICEN;
 
                if (enabled)
@@ -171,8 +171,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
                goto retry_get_time;
        }
 
-       pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
-                rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+       pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+                1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
                 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
 
        rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
@@ -185,7 +185,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
        rtc_tm->tm_year += 100;
        rtc_tm->tm_mon -= 1;
 
-       return 0;
+       return rtc_valid_tm(rtc_tm);
 }
 
 static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
@@ -193,8 +193,8 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
-                tm->tm_year, tm->tm_mon, tm->tm_mday,
+       pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
+                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
 
        /* we get around y2k by simply not supporting it */
@@ -231,9 +231,9 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
 
-       pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+       pr_debug("read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
                 alm_en,
-                alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+                1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
                 alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
 
 
@@ -242,34 +242,34 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
        if (alm_en & S3C2410_RTCALM_SECEN)
                alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
        else
-               alm_tm->tm_sec = 0xff;
+               alm_tm->tm_sec = -1;
 
        if (alm_en & S3C2410_RTCALM_MINEN)
                alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
        else
-               alm_tm->tm_min = 0xff;
+               alm_tm->tm_min = -1;
 
        if (alm_en & S3C2410_RTCALM_HOUREN)
                alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
        else
-               alm_tm->tm_hour = 0xff;
+               alm_tm->tm_hour = -1;
 
        if (alm_en & S3C2410_RTCALM_DAYEN)
                alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
        else
-               alm_tm->tm_mday = 0xff;
+               alm_tm->tm_mday = -1;
 
        if (alm_en & S3C2410_RTCALM_MONEN) {
                alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
                alm_tm->tm_mon -= 1;
        } else {
-               alm_tm->tm_mon = 0xff;
+               alm_tm->tm_mon = -1;
        }
 
        if (alm_en & S3C2410_RTCALM_YEAREN)
                alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
        else
-               alm_tm->tm_year = 0xffff;
+               alm_tm->tm_year = -1;
 
        return 0;
 }
@@ -280,10 +280,10 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        void __iomem *base = s3c_rtc_base;
        unsigned int alrm_en;
 
-       pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+       pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
                 alrm->enabled,
-                tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
-                tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+                tm->tm_hour, tm->tm_min, tm->tm_sec);
 
 
        alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
@@ -318,7 +318,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
        unsigned int ticnt;
 
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
+               ticnt = readw(s3c_rtc_base + S3C2410_RTCCON);
                ticnt &= S3C64XX_RTCCON_TICEN;
        } else {
                ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
@@ -379,7 +379,8 @@ static const struct rtc_class_ops s3c_rtcops = {
        .set_alarm      = s3c_rtc_setalarm,
        .irq_set_freq   = s3c_rtc_setfreq,
        .irq_set_state  = s3c_rtc_setpie,
-       .proc           = s3c_rtc_proc,
+       .proc           = s3c_rtc_proc,
+       .alarm_irq_enable = s3c_rtc_setaie,
 };
 
 static void s3c_rtc_enable(struct platform_device *pdev, int en)
@@ -391,11 +392,11 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
                return;
 
        if (!en) {
-               tmp = readb(base + S3C2410_RTCCON);
+               tmp = readw(base + S3C2410_RTCCON);
                if (s3c_rtc_cpu_type == TYPE_S3C64XX)
                        tmp &= ~S3C64XX_RTCCON_TICEN;
                tmp &= ~S3C2410_RTCCON_RTCEN;
-               writeb(tmp, base + S3C2410_RTCCON);
+               writew(tmp, base + S3C2410_RTCCON);
 
                if (s3c_rtc_cpu_type == TYPE_S3C2410) {
                        tmp = readb(base + S3C2410_TICNT);
@@ -405,25 +406,28 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
        } else {
                /* re-enable the device, and check it is ok */
 
-               if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
+               if ((readw(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0) {
                        dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
 
-                       tmp = readb(base + S3C2410_RTCCON);
-                       writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
+                       tmp = readw(base + S3C2410_RTCCON);
+                       writew(tmp | S3C2410_RTCCON_RTCEN,
+                               base + S3C2410_RTCCON);
                }
 
-               if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
+               if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)) {
                        dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
 
-                       tmp = readb(base + S3C2410_RTCCON);
-                       writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
+                       tmp = readw(base + S3C2410_RTCCON);
+                       writew(tmp & ~S3C2410_RTCCON_CNTSEL,
+                               base + S3C2410_RTCCON);
                }
 
-               if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
+               if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)) {
                        dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
 
-                       tmp = readb(base + S3C2410_RTCCON);
-                       writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
+                       tmp = readw(base + S3C2410_RTCCON);
+                       writew(tmp & ~S3C2410_RTCCON_CLKRST,
+                               base + S3C2410_RTCCON);
                }
        }
 }
@@ -452,8 +456,8 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
 static int __devinit s3c_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
+       struct rtc_time rtc_tm;
        struct resource *res;
-       unsigned int tmp, i;
        int ret;
 
        pr_debug("%s: probe=%p\n", __func__, pdev);
@@ -514,8 +518,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
 
        s3c_rtc_enable(pdev, 1);
 
-       pr_debug("s3c2410_rtc: RTCCON=%02x\n",
-                readb(s3c_rtc_base + S3C2410_RTCCON));
+       pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+                readw(s3c_rtc_base + S3C2410_RTCCON));
 
        device_init_wakeup(&pdev->dev, 1);
 
@@ -534,11 +538,19 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
 
        /* Check RTC Time */
 
-       for (i = S3C2410_RTCSEC; i <= S3C2410_RTCYEAR; i += 0x4) {
-               tmp = readb(s3c_rtc_base + i);
+       s3c_rtc_gettime(NULL, &rtc_tm);
+
+       if (rtc_valid_tm(&rtc_tm)) {
+               rtc_tm.tm_year  = 100;
+               rtc_tm.tm_mon   = 0;
+               rtc_tm.tm_mday  = 1;
+               rtc_tm.tm_hour  = 0;
+               rtc_tm.tm_min   = 0;
+               rtc_tm.tm_sec   = 0;
+
+               s3c_rtc_settime(NULL, &rtc_tm);
 
-               if ((tmp & 0xf) > 0x9 || ((tmp >> 4) & 0xf) > 0x9)
-                       writeb(0, s3c_rtc_base + i);
+               dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
        }
 
        if (s3c_rtc_cpu_type == TYPE_S3C64XX)
@@ -578,7 +590,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
        /* save TICNT for anyone using periodic interrupts */
        ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
+               ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
                ticnt_en_save &= S3C64XX_RTCCON_TICEN;
        }
        s3c_rtc_enable(pdev, 0);
@@ -596,8 +608,8 @@ static int s3c_rtc_resume(struct platform_device *pdev)
        s3c_rtc_enable(pdev, 1);
        writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
        if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
-               tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
-               writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+               tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+               writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
 
        if (device_may_wakeup(&pdev->dev))
index 42e303f..0e6aa3d 100644 (file)
@@ -697,9 +697,9 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        struct inode *inode = file->f_path.dentry->d_inode;
        int fbidx = iminor(inode);
        struct fb_info *info = registered_fb[fbidx];
-       u32 *buffer, *dst;
-       u32 __iomem *src;
-       int c, i, cnt = 0, err = 0;
+       u8 *buffer, *dst;
+       u8 __iomem *src;
+       int c, cnt = 0, err = 0;
        unsigned long total_size;
 
        if (!info || ! info->screen_base)
@@ -730,7 +730,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        if (!buffer)
                return -ENOMEM;
 
-       src = (u32 __iomem *) (info->screen_base + p);
+       src = (u8 __iomem *) (info->screen_base + p);
 
        if (info->fbops->fb_sync)
                info->fbops->fb_sync(info);
@@ -738,17 +738,9 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        while (count) {
                c  = (count > PAGE_SIZE) ? PAGE_SIZE : count;
                dst = buffer;
-               for (i = c >> 2; i--; )
-                       *dst++ = fb_readl(src++);
-               if (c & 3) {
-                       u8 *dst8 = (u8 *) dst;
-                       u8 __iomem *src8 = (u8 __iomem *) src;
-
-                       for (i = c & 3; i--;)
-                               *dst8++ = fb_readb(src8++);
-
-                       src = (u32 __iomem *) src8;
-               }
+               fb_memcpy_fromfb(dst, src, c);
+               dst += c;
+               src += c;
 
                if (copy_to_user(buf, buffer, c)) {
                        err = -EFAULT;
@@ -772,9 +764,9 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
        struct inode *inode = file->f_path.dentry->d_inode;
        int fbidx = iminor(inode);
        struct fb_info *info = registered_fb[fbidx];
-       u32 *buffer, *src;
-       u32 __iomem *dst;
-       int c, i, cnt = 0, err = 0;
+       u8 *buffer, *src;
+       u8 __iomem *dst;
+       int c, cnt = 0, err = 0;
        unsigned long total_size;
 
        if (!info || !info->screen_base)
@@ -811,7 +803,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
        if (!buffer)
                return -ENOMEM;
 
-       dst = (u32 __iomem *) (info->screen_base + p);
+       dst = (u8 __iomem *) (info->screen_base + p);
 
        if (info->fbops->fb_sync)
                info->fbops->fb_sync(info);
@@ -825,19 +817,9 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
                        break;
                }
 
-               for (i = c >> 2; i--; )
-                       fb_writel(*src++, dst++);
-
-               if (c & 3) {
-                       u8 *src8 = (u8 *) src;
-                       u8 __iomem *dst8 = (u8 __iomem *) dst;
-
-                       for (i = c & 3; i--; )
-                               fb_writeb(*src8++, dst8++);
-
-                       dst = (u32 __iomem *) dst8;
-               }
-
+               fb_memcpy_tofb(dst, src, c);
+               dst += c;
+               src += c;
                *ppos += c;
                buf += c;
                cnt += c;
@@ -877,13 +859,13 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
 
        if ((err = info->fbops->fb_pan_display(var, info)))
                return err;
-        info->var.xoffset = var->xoffset;
-        info->var.yoffset = var->yoffset;
-        if (var->vmode & FB_VMODE_YWRAP)
-                info->var.vmode |= FB_VMODE_YWRAP;
-        else
-                info->var.vmode &= ~FB_VMODE_YWRAP;
-        return 0;
+       info->var.xoffset = var->xoffset;
+       info->var.yoffset = var->yoffset;
+       if (var->vmode & FB_VMODE_YWRAP)
+               info->var.vmode |= FB_VMODE_YWRAP;
+       else
+               info->var.vmode &= ~FB_VMODE_YWRAP;
+       return 0;
 }
 
 static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
index ca3355e..933899d 100644 (file)
@@ -1143,8 +1143,10 @@ static int __devinit gbefb_probe(struct platform_device *p_dev)
                return -ENOMEM;
 
 #ifndef MODULE
-       if (fb_get_options("gbefb", &options))
-               return -ENODEV;
+       if (fb_get_options("gbefb", &options)) {
+               ret = -ENODEV;
+               goto out_release_framebuffer;
+       }
        gbefb_setup(options);
 #endif
 
index f9fa0fd..1717623 100644 (file)
@@ -869,12 +869,9 @@ static int MGAG100_preinit(struct matrox_fb_info *minfo)
        minfo->capable.plnwt = minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100
                        ? minfo->devflags.sgram : 1;
 
-#ifdef CONFIG_FB_MATROX_G
        if (minfo->devflags.g450dac) {
                minfo->outputs[0].output = &g450out;
-       } else
-#endif
-       {
+       } else {
                minfo->outputs[0].output = &m1064;
        }
        minfo->outputs[0].src = minfo->outputs[0].default_src;
index 1e3e8f1..31b8f67 100644 (file)
@@ -280,7 +280,7 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
        return fxtal * (*feed) / (*in) * ctl->den;
 }
 
-static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
+static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl,
                unsigned int htotal, unsigned int vtotal,
                unsigned int* in, unsigned int* feed, unsigned int* post,
                unsigned int* htotal2) {
index 2ffb34a..87785c2 100644 (file)
@@ -1590,7 +1590,7 @@ static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
        blizzard.auto_update_window.width = fbdev->panel->x_res;
        blizzard.auto_update_window.height = fbdev->panel->y_res;
        blizzard.auto_update_window.out_x = 0;
-       blizzard.auto_update_window.out_x = 0;
+       blizzard.auto_update_window.out_y = 0;
        blizzard.auto_update_window.out_width = fbdev->panel->x_res;
        blizzard.auto_update_window.out_height = fbdev->panel->y_res;
        blizzard.auto_update_window.format = 0;
index ed371c8..b16e613 100644 (file)
@@ -181,6 +181,15 @@ void savagefb_create_i2c_busses(struct fb_info *info)
                par->chan.algo.getscl = prosavage_gpio_getscl;
                break;
        case FB_ACCEL_SAVAGE4:
+               par->chan.reg = CR_SERIAL1;
+               if (par->pcidev->revision > 1 && !(VGArCR(0xa6, par) & 0x40))
+                       par->chan.reg = CR_SERIAL2;
+               par->chan.ioaddr      = par->mmio.vbase;
+               par->chan.algo.setsda = prosavage_gpio_setsda;
+               par->chan.algo.setscl = prosavage_gpio_setscl;
+               par->chan.algo.getsda = prosavage_gpio_getsda;
+               par->chan.algo.getscl = prosavage_gpio_getscl;
+               break;
        case FB_ACCEL_SAVAGE2000:
                par->chan.reg         = 0xff20;
                par->chan.ioaddr      = par->mmio.vbase;
index 2839e28..b7b5014 100644 (file)
@@ -517,10 +517,10 @@ static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO);
 static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
 static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
 static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
-static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUGO);
+static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP);
 
 static struct attribute *w1_master_default_attrs[] = {
        &w1_master_attribute_name.attr,
index b5e582b..97673c9 100644 (file)
@@ -53,7 +53,6 @@ config EXPORTFS
 config FILE_LOCKING
        bool "Enable POSIX file locking API" if EMBEDDED
        default y
-       select BKL # while lockd still uses it.
        help
          This option enables standard file locking support, required
           for filesystems like NFS and for the flock() system
index bb4cc5b..79e2ca7 100644 (file)
@@ -42,7 +42,7 @@ config BINFMT_ELF_FDPIC
 
 config CORE_DUMP_DEFAULT_ELF_HEADERS
        bool "Write ELF core dumps with partial segments"
-       default n
+       default y
        depends on BINFMT_ELF && ELF_CORE
        help
          ELF core dump files describe each memory mapping of the crashed
@@ -60,7 +60,7 @@ config CORE_DUMP_DEFAULT_ELF_HEADERS
          inherited.  See Documentation/filesystems/proc.txt for details.
 
          This config option changes the default setting of coredump_filter
-         seen at boot time.  If unsure, say N.
+         seen at boot time.  If unsure, say Y.
 
 config BINFMT_FLAT
        bool "Kernel support for flat binaries"
index 256bb7b..8cf0724 100644 (file)
@@ -77,9 +77,6 @@
 /* Maximum number of nesting allowed inside epoll sets */
 #define EP_MAX_NESTS 4
 
-/* Maximum msec timeout value storeable in a long int */
-#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
-
 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
 
 #define EP_UNACTIVE_PTR ((void *) -1L)
@@ -1117,18 +1114,22 @@ static int ep_send_events(struct eventpoll *ep,
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                   int maxevents, long timeout)
 {
-       int res, eavail;
+       int res, eavail, timed_out = 0;
        unsigned long flags;
-       long jtimeout;
+       long slack;
        wait_queue_t wait;
-
-       /*
-        * Calculate the timeout by checking for the "infinite" value (-1)
-        * and the overflow condition. The passed timeout is in milliseconds,
-        * that why (t * HZ) / 1000.
-        */
-       jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
-               MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
+       struct timespec end_time;
+       ktime_t expires, *to = NULL;
+
+       if (timeout > 0) {
+               ktime_get_ts(&end_time);
+               timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+               slack = select_estimate_accuracy(&end_time);
+               to = &expires;
+               *to = timespec_to_ktime(end_time);
+       } else if (timeout == 0) {
+               timed_out = 1;
+       }
 
 retry:
        spin_lock_irqsave(&ep->lock, flags);
@@ -1150,7 +1151,7 @@ retry:
                         * to TASK_INTERRUPTIBLE before doing the checks.
                         */
                        set_current_state(TASK_INTERRUPTIBLE);
-                       if (!list_empty(&ep->rdllist) || !jtimeout)
+                       if (!list_empty(&ep->rdllist) || timed_out)
                                break;
                        if (signal_pending(current)) {
                                res = -EINTR;
@@ -1158,7 +1159,9 @@ retry:
                        }
 
                        spin_unlock_irqrestore(&ep->lock, flags);
-                       jtimeout = schedule_timeout(jtimeout);
+                       if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                               timed_out = 1;
+
                        spin_lock_irqsave(&ep->lock, flags);
                }
                __remove_wait_queue(&ep->wq, &wait);
@@ -1176,7 +1179,7 @@ retry:
         * more luck.
         */
        if (!res && eavail &&
-           !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
+           !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
                goto retry;
 
        return res;
index 3aa75b8..99d33a1 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -66,6 +66,12 @@ char core_pattern[CORENAME_MAX_SIZE] = "core";
 unsigned int core_pipe_limit;
 int suid_dumpable = 0;
 
+struct core_name {
+       char *corename;
+       int used, size;
+};
+static atomic_t call_count = ATOMIC_INIT(1);
+
 /* The maximal length of core_pattern is also specified in sysctl.c */
 
 static LIST_HEAD(formats);
@@ -1003,7 +1009,7 @@ int flush_old_exec(struct linux_binprm * bprm)
 
        bprm->mm = NULL;                /* We're using it now */
 
-       current->flags &= ~PF_RANDOMIZE;
+       current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
        flush_thread();
        current->personality &= ~bprm->per_clear;
 
@@ -1083,14 +1089,14 @@ EXPORT_SYMBOL(setup_new_exec);
  */
 int prepare_bprm_creds(struct linux_binprm *bprm)
 {
-       if (mutex_lock_interruptible(&current->cred_guard_mutex))
+       if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
                return -ERESTARTNOINTR;
 
        bprm->cred = prepare_exec_creds();
        if (likely(bprm->cred))
                return 0;
 
-       mutex_unlock(&current->cred_guard_mutex);
+       mutex_unlock(&current->signal->cred_guard_mutex);
        return -ENOMEM;
 }
 
@@ -1098,7 +1104,7 @@ void free_bprm(struct linux_binprm *bprm)
 {
        free_arg_pages(bprm);
        if (bprm->cred) {
-               mutex_unlock(&current->cred_guard_mutex);
+               mutex_unlock(&current->signal->cred_guard_mutex);
                abort_creds(bprm->cred);
        }
        kfree(bprm);
@@ -1119,13 +1125,13 @@ void install_exec_creds(struct linux_binprm *bprm)
         * credentials; any time after this it may be unlocked.
         */
        security_bprm_committed_creds(bprm);
-       mutex_unlock(&current->cred_guard_mutex);
+       mutex_unlock(&current->signal->cred_guard_mutex);
 }
 EXPORT_SYMBOL(install_exec_creds);
 
 /*
  * determine how safe it is to execute the proposed program
- * - the caller must hold current->cred_guard_mutex to protect against
+ * - the caller must hold ->cred_guard_mutex to protect against
  *   PTRACE_ATTACH
  */
 int check_unsafe_exec(struct linux_binprm *bprm)
@@ -1406,7 +1412,6 @@ int do_execve(const char * filename,
        if (retval < 0)
                goto out;
 
-       current->flags &= ~PF_KTHREAD;
        retval = search_binary_handler(bprm,regs);
        if (retval < 0)
                goto out;
@@ -1459,127 +1464,148 @@ void set_binfmt(struct linux_binfmt *new)
 
 EXPORT_SYMBOL(set_binfmt);
 
+static int expand_corename(struct core_name *cn)
+{
+       char *old_corename = cn->corename;
+
+       cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
+       cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+       if (!cn->corename) {
+               kfree(old_corename);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int cn_printf(struct core_name *cn, const char *fmt, ...)
+{
+       char *cur;
+       int need;
+       int ret;
+       va_list arg;
+
+       va_start(arg, fmt);
+       need = vsnprintf(NULL, 0, fmt, arg);
+       va_end(arg);
+
+       if (likely(need < cn->size - cn->used - 1))
+               goto out_printf;
+
+       ret = expand_corename(cn);
+       if (ret)
+               goto expand_fail;
+
+out_printf:
+       cur = cn->corename + cn->used;
+       va_start(arg, fmt);
+       vsnprintf(cur, need + 1, fmt, arg);
+       va_end(arg);
+       cn->used += need;
+       return 0;
+
+expand_fail:
+       return ret;
+}
+
 /* format_corename will inspect the pattern parameter, and output a
  * name into corename, which must have space for at least
  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  */
-static int format_corename(char *corename, long signr)
+static int format_corename(struct core_name *cn, long signr)
 {
        const struct cred *cred = current_cred();
        const char *pat_ptr = core_pattern;
        int ispipe = (*pat_ptr == '|');
-       char *out_ptr = corename;
-       char *const out_end = corename + CORENAME_MAX_SIZE;
-       int rc;
        int pid_in_pattern = 0;
+       int err = 0;
+
+       cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
+       cn->corename = kmalloc(cn->size, GFP_KERNEL);
+       cn->used = 0;
+
+       if (!cn->corename)
+               return -ENOMEM;
 
        /* Repeat as long as we have more pattern to process and more output
           space */
        while (*pat_ptr) {
                if (*pat_ptr != '%') {
-                       if (out_ptr == out_end)
+                       if (*pat_ptr == 0)
                                goto out;
-                       *out_ptr++ = *pat_ptr++;
+                       err = cn_printf(cn, "%c", *pat_ptr++);
                } else {
                        switch (*++pat_ptr) {
+                       /* single % at the end, drop that */
                        case 0:
                                goto out;
                        /* Double percent, output one percent */
                        case '%':
-                               if (out_ptr == out_end)
-                                       goto out;
-                               *out_ptr++ = '%';
+                               err = cn_printf(cn, "%c", '%');
                                break;
                        /* pid */
                        case 'p':
                                pid_in_pattern = 1;
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", task_tgid_vnr(current));
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%d",
+                                             task_tgid_vnr(current));
                                break;
                        /* uid */
                        case 'u':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", cred->uid);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%d", cred->uid);
                                break;
                        /* gid */
                        case 'g':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", cred->gid);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%d", cred->gid);
                                break;
                        /* signal that caused the coredump */
                        case 's':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%ld", signr);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%ld", signr);
                                break;
                        /* UNIX time of coredump */
                        case 't': {
                                struct timeval tv;
                                do_gettimeofday(&tv);
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%lu", tv.tv_sec);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%lu", tv.tv_sec);
                                break;
                        }
                        /* hostname */
                        case 'h':
                                down_read(&uts_sem);
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%s", utsname()->nodename);
+                               err = cn_printf(cn, "%s",
+                                             utsname()->nodename);
                                up_read(&uts_sem);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
                                break;
                        /* executable */
                        case 'e':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%s", current->comm);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%s", current->comm);
                                break;
                        /* core limit size */
                        case 'c':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%lu", rlimit(RLIMIT_CORE));
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%lu",
+                                             rlimit(RLIMIT_CORE));
                                break;
                        default:
                                break;
                        }
                        ++pat_ptr;
                }
+
+               if (err)
+                       return err;
        }
+
        /* Backward compatibility with core_uses_pid:
         *
         * If core_pattern does not include a %p (as is the default)
         * and core_uses_pid is set, then .%pid will be appended to
         * the filename. Do not do this for piped commands. */
        if (!ispipe && !pid_in_pattern && core_uses_pid) {
-               rc = snprintf(out_ptr, out_end - out_ptr,
-                             ".%d", task_tgid_vnr(current));
-               if (rc > out_end - out_ptr)
-                       goto out;
-               out_ptr += rc;
+               err = cn_printf(cn, ".%d", task_tgid_vnr(current));
+               if (err)
+                       return err;
        }
 out:
-       *out_ptr = 0;
        return ispipe;
 }
 
@@ -1856,7 +1882,7 @@ static int umh_pipe_setup(struct subprocess_info *info)
 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
 {
        struct core_state core_state;
-       char corename[CORENAME_MAX_SIZE + 1];
+       struct core_name cn;
        struct mm_struct *mm = current->mm;
        struct linux_binfmt * binfmt;
        const struct cred *old_cred;
@@ -1911,7 +1937,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
         */
        clear_thread_flag(TIF_SIGPENDING);
 
-       ispipe = format_corename(corename, signr);
+       ispipe = format_corename(&cn, signr);
+
+       if (ispipe == -ENOMEM) {
+               printk(KERN_WARNING "format_corename failed\n");
+               printk(KERN_WARNING "Aborting core\n");
+               goto fail_corename;
+       }
 
        if (ispipe) {
                int dump_count;
@@ -1948,7 +1980,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
                        goto fail_dropcount;
                }
 
-               helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
+               helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
                if (!helper_argv) {
                        printk(KERN_WARNING "%s failed to allocate memory\n",
                               __func__);
@@ -1961,7 +1993,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
                argv_free(helper_argv);
                if (retval) {
                        printk(KERN_INFO "Core dump to %s pipe failed\n",
-                              corename);
+                              cn.corename);
                        goto close_fail;
                }
        } else {
@@ -1970,7 +2002,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
                if (cprm.limit < binfmt->min_coredump)
                        goto fail_unlock;
 
-               cprm.file = filp_open(corename,
+               cprm.file = filp_open(cn.corename,
                                 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
                                 0600);
                if (IS_ERR(cprm.file))
@@ -2012,6 +2044,8 @@ fail_dropcount:
        if (ispipe)
                atomic_dec(&core_dump_count);
 fail_unlock:
+       kfree(cn.corename);
+fail_corename:
        coredump_finish(mm);
        revert_creds(old_cred);
 fail_creds:
index f8cc34f..ecc8b39 100644 (file)
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
  * match the state "is the filp on a fasync list".
  *
  */
-static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
+int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
 {
        struct fasync_struct *fa, **fp;
        int result = 0;
@@ -666,21 +666,31 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
        return result;
 }
 
+struct fasync_struct *fasync_alloc(void)
+{
+       return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+}
+
 /*
- * Add a fasync entry. Return negative on error, positive if
- * added, and zero if did nothing but change an existing one.
+ * NOTE! This can be used only for unused fasync entries:
+ * entries that actually got inserted on the fasync list
+ * need to be released by rcu - see fasync_remove_entry.
+ */
+void fasync_free(struct fasync_struct *new)
+{
+       kmem_cache_free(fasync_cache, new);
+}
+
+/*
+ * Insert a new entry into the fasync list.  Return the pointer to the
+ * old one if we didn't use the new one.
  *
  * NOTE! It is very important that the FASYNC flag always
  * match the state "is the filp on a fasync list".
  */
-static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
 {
-       struct fasync_struct *new, *fa, **fp;
-       int result = 0;
-
-       new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
-       if (!new)
-               return -ENOMEM;
+        struct fasync_struct *fa, **fp;
 
        spin_lock(&filp->f_lock);
        spin_lock(&fasync_lock);
@@ -691,8 +701,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
                spin_lock_irq(&fa->fa_lock);
                fa->fa_fd = fd;
                spin_unlock_irq(&fa->fa_lock);
-
-               kmem_cache_free(fasync_cache, new);
                goto out;
        }
 
@@ -702,13 +710,39 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
        new->fa_fd = fd;
        new->fa_next = *fapp;
        rcu_assign_pointer(*fapp, new);
-       result = 1;
        filp->f_flags |= FASYNC;
 
 out:
        spin_unlock(&fasync_lock);
        spin_unlock(&filp->f_lock);
-       return result;
+       return fa;
+}
+
+/*
+ * Add a fasync entry. Return negative on error, positive if
+ * added, and zero if did nothing but change an existing one.
+ */
+static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+{
+       struct fasync_struct *new;
+
+       new = fasync_alloc();
+       if (!new)
+               return -ENOMEM;
+
+       /*
+        * fasync_insert_entry() returns the old (update) entry if
+        * it existed.
+        *
+        * So free the (unused) new entry and return 0 to let the
+        * caller know that we didn't add any new fasync entries.
+        */
+       if (fasync_insert_entry(fd, filp, fapp, new)) {
+               fasync_free(new);
+               return 0;
+       }
+
+       return 1;
 }
 
 /*
index b986642..6e07696 100644 (file)
@@ -1334,12 +1334,7 @@ out_finish:
 
 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
 {
-       int i;
-
-       for (i = 0; i < req->num_pages; i++) {
-               struct page *page = req->pages[i];
-               page_cache_release(page);
-       }
+       release_pages(req->pages, req->num_pages, 0);
 }
 
 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
index 60c2b94..79cf7f6 100644 (file)
@@ -543,6 +543,34 @@ static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
        return vol_desc_start;
 }
 
+/*
+ * Check if root directory is empty (has less than 3 files).
+ *
+ * Used to detect broken CDs where ISO root directory is empty but Joliet root
+ * directory is OK. If such CD has Rock Ridge extensions, they will be disabled
+ * (and Joliet used instead) or else no files would be visible.
+ */
+static bool rootdir_empty(struct super_block *sb, unsigned long block)
+{
+       int offset = 0, files = 0, de_len;
+       struct iso_directory_record *de;
+       struct buffer_head *bh;
+
+       bh = sb_bread(sb, block);
+       if (!bh)
+               return true;
+       while (files < 3) {
+               de = (struct iso_directory_record *) (bh->b_data + offset);
+               de_len = *(unsigned char *) de;
+               if (de_len == 0)
+                       break;
+               files++;
+               offset += de_len;
+       }
+       brelse(bh);
+       return files < 3;
+}
+
 /*
  * Initialize the superblock and read the root inode.
  *
@@ -842,6 +870,18 @@ root_found:
        if (IS_ERR(inode))
                goto out_no_root;
 
+       /*
+        * Fix for broken CDs with Rock Ridge and empty ISO root directory but
+        * correct Joliet root directory.
+        */
+       if (sbi->s_rock == 1 && joliet_level &&
+                               rootdir_empty(s, sbi->s_firstdatazone)) {
+               printk(KERN_NOTICE
+                       "ISOFS: primary root directory is empty. "
+                       "Disabling Rock Ridge and switching to Joliet.");
+               sbi->s_rock = 0;
+       }
+
        /*
         * If this disk has both Rock Ridge and Joliet on it, then we
         * want to use Rock Ridge by default.  This can be overridden
index b13aabc..abfff9d 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/in.h>
 #include <linux/uio.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/mutex.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
@@ -130,15 +129,6 @@ lockd(void *vrqstp)
 
        dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
-       /*
-        * FIXME: it would be nice if lockd didn't spend its entire life
-        * running under the BKL. At the very least, it would be good to
-        * have someone clarify what it's intended to protect here. I've
-        * seen some handwavy posts about posix locking needing to be
-        * done under the BKL, but it's far from clear.
-        */
-       lock_kernel();
-
        if (!nlm_timeout)
                nlm_timeout = LOCKD_DFLT_TIMEO;
        nlmsvc_timeout = nlm_timeout * HZ;
@@ -195,7 +185,6 @@ lockd(void *vrqstp)
        if (nlmsvc_ops)
                nlmsvc_invalidate_all();
        nlm_shutdown_hosts();
-       unlock_kernel();
        return 0;
 }
 
index 6f1ef00..c462d34 100644 (file)
@@ -700,14 +700,16 @@ nlmsvc_notify_blocked(struct file_lock *fl)
        struct nlm_block        *block;
 
        dprintk("lockd: VFS unblock notification for block %p\n", fl);
+       spin_lock(&nlm_blocked_lock);
        list_for_each_entry(block, &nlm_blocked, b_list) {
                if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
-                       nlmsvc_insert_block(block, 0);
+                       nlmsvc_insert_block_locked(block, 0);
+                       spin_unlock(&nlm_blocked_lock);
                        svc_wake_up(block->b_daemon);
                        return;
                }
        }
-
+       spin_unlock(&nlm_blocked_lock);
        printk(KERN_WARNING "lockd: notification for unknown block!\n");
 }
 
index d0ef94c..1ca0679 100644 (file)
@@ -170,6 +170,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
 
 again:
        file->f_locks = 0;
+       lock_flocks(); /* protects i_flock list */
        for (fl = inode->i_flock; fl; fl = fl->fl_next) {
                if (fl->fl_lmops != &nlmsvc_lock_operations)
                        continue;
@@ -181,6 +182,7 @@ again:
                if (match(lockhost, host)) {
                        struct file_lock lock = *fl;
 
+                       unlock_flocks();
                        lock.fl_type  = F_UNLCK;
                        lock.fl_start = 0;
                        lock.fl_end   = OFFSET_MAX;
@@ -192,6 +194,7 @@ again:
                        goto again;
                }
        }
+       unlock_flocks();
 
        return 0;
 }
@@ -226,10 +229,14 @@ nlm_file_inuse(struct nlm_file *file)
        if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
                return 1;
 
+       lock_flocks();
        for (fl = inode->i_flock; fl; fl = fl->fl_next) {
-               if (fl->fl_lmops == &nlmsvc_lock_operations)
+               if (fl->fl_lmops == &nlmsvc_lock_operations) {
+                       unlock_flocks();
                        return 1;
+               }
        }
+       unlock_flocks();
        file->f_locks = 0;
        return 0;
 }
index 4de3a26..50ec159 100644 (file)
@@ -142,6 +142,7 @@ int lease_break_time = 45;
 
 static LIST_HEAD(file_lock_list);
 static LIST_HEAD(blocked_list);
+static DEFINE_SPINLOCK(file_lock_lock);
 
 /*
  * Protects the two list heads above, plus the inode->i_flock list
@@ -149,23 +150,24 @@ static LIST_HEAD(blocked_list);
  */
 void lock_flocks(void)
 {
-       lock_kernel();
+       spin_lock(&file_lock_lock);
 }
 EXPORT_SYMBOL_GPL(lock_flocks);
 
 void unlock_flocks(void)
 {
-       unlock_kernel();
+       spin_unlock(&file_lock_lock);
 }
 EXPORT_SYMBOL_GPL(unlock_flocks);
 
 static struct kmem_cache *filelock_cache __read_mostly;
 
 /* Allocate an empty lock structure. */
-static struct file_lock *locks_alloc_lock(void)
+struct file_lock *locks_alloc_lock(void)
 {
        return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
 }
+EXPORT_SYMBOL_GPL(locks_alloc_lock);
 
 void locks_release_private(struct file_lock *fl)
 {
@@ -1365,7 +1367,6 @@ int fcntl_getlease(struct file *filp)
 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
 {
        struct file_lock *fl, **before, **my_before = NULL, *lease;
-       struct file_lock *new_fl = NULL;
        struct dentry *dentry = filp->f_path.dentry;
        struct inode *inode = dentry->d_inode;
        int error, rdlease_count = 0, wrlease_count = 0;
@@ -1385,11 +1386,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        lease = *flp;
 
        if (arg != F_UNLCK) {
-               error = -ENOMEM;
-               new_fl = locks_alloc_lock();
-               if (new_fl == NULL)
-                       goto out;
-
                error = -EAGAIN;
                if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
                        goto out;
@@ -1434,7 +1430,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
                goto out;
        }
 
-       error = 0;
        if (arg == F_UNLCK)
                goto out;
 
@@ -1442,15 +1437,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        if (!leases_enable)
                goto out;
 
-       locks_copy_lock(new_fl, lease);
-       locks_insert_lock(before, new_fl);
-
-       *flp = new_fl;
+       locks_insert_lock(before, lease);
        return 0;
 
 out:
-       if (new_fl != NULL)
-               locks_free_lock(new_fl);
+       locks_free_lock(lease);
        return error;
 }
 EXPORT_SYMBOL(generic_setlease);
@@ -1514,26 +1505,38 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
  */
 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
 {
-       struct file_lock fl, *flp = &fl;
+       struct file_lock *fl;
+       struct fasync_struct *new;
        struct inode *inode = filp->f_path.dentry->d_inode;
        int error;
 
-       locks_init_lock(&fl);
-       error = lease_init(filp, arg, &fl);
-       if (error)
-               return error;
+       fl = lease_alloc(filp, arg);
+       if (IS_ERR(fl))
+               return PTR_ERR(fl);
 
+       new = fasync_alloc();
+       if (!new) {
+               locks_free_lock(fl);
+               return -ENOMEM;
+       }
        lock_flocks();
-
-       error = __vfs_setlease(filp, arg, &flp);
+       error = __vfs_setlease(filp, arg, &fl);
        if (error || arg == F_UNLCK)
                goto out_unlock;
 
-       error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
+       /*
+        * fasync_insert_entry() returns the old entry if any.
+        * If there was no old entry, then it used 'new' and
+        * inserted it into the fasync list. Clear new so that
+        * we don't release it here.
+        */
+       if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+               new = NULL;
+
        if (error < 0) {
                /* remove lease just inserted by setlease */
-               flp->fl_type = F_UNLCK | F_INPROGRESS;
-               flp->fl_break_time = jiffies - 10;
+               fl->fl_type = F_UNLCK | F_INPROGRESS;
+               fl->fl_break_time = jiffies - 10;
                time_out_leases(inode);
                goto out_unlock;
        }
@@ -1541,6 +1544,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
        error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 out_unlock:
        unlock_flocks();
+       if (new)
+               fasync_free(new);
        return error;
 }
 
index fd66765..ba30665 100644 (file)
@@ -1,7 +1,6 @@
 config NFS_FS
        tristate "NFS client support"
        depends on INET && FILE_LOCKING
-       depends on BKL # fix as soon as lockd is done
        select LOCKD
        select SUNRPC
        select NFS_ACL_SUPPORT if NFS_V3_ACL
index 31a78fc..18b3e89 100644 (file)
@@ -2,7 +2,6 @@ config NFSD
        tristate "NFS server support"
        depends on INET
        depends on FILE_LOCKING
-       depends on BKL # fix as soon as lockd is done
        select LOCKD
        select SUNRPC
        select EXPORTFS
index 9019e8e..56347e0 100644 (file)
@@ -2614,7 +2614,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
        struct nfs4_delegation *dp;
        struct nfs4_stateowner *sop = stp->st_stateowner;
        int cb_up = atomic_read(&sop->so_client->cl_cb_set);
-       struct file_lock fl, *flp = &fl;
+       struct file_lock *fl;
        int status, flag = 0;
 
        flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2648,20 +2648,24 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
                flag = NFS4_OPEN_DELEGATE_NONE;
                goto out;
        }
-       locks_init_lock(&fl);
-       fl.fl_lmops = &nfsd_lease_mng_ops;
-       fl.fl_flags = FL_LEASE;
-       fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
-       fl.fl_end = OFFSET_MAX;
-       fl.fl_owner =  (fl_owner_t)dp;
-       fl.fl_file = find_readable_file(stp->st_file);
-       BUG_ON(!fl.fl_file);
-       fl.fl_pid = current->tgid;
+       status = -ENOMEM;
+       fl = locks_alloc_lock();
+       if (!fl)
+               goto out;
+       locks_init_lock(fl);
+       fl->fl_lmops = &nfsd_lease_mng_ops;
+       fl->fl_flags = FL_LEASE;
+       fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+       fl->fl_end = OFFSET_MAX;
+       fl->fl_owner =  (fl_owner_t)dp;
+       fl->fl_file = find_readable_file(stp->st_file);
+       BUG_ON(!fl->fl_file);
+       fl->fl_pid = current->tgid;
 
        /* vfs_setlease checks to see if delegation should be handed out.
         * the lock_manager callbacks fl_mylease and fl_change are used
         */
-       if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) {
+       if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
                dprintk("NFSD: setlease failed [%d], no delegation\n", status);
                unhash_delegation(dp);
                flag = NFS4_OPEN_DELEGATE_NONE;
index 9b094c1..f3d02ca 100644 (file)
@@ -226,7 +226,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
 {
        struct mm_struct *mm;
 
-       if (mutex_lock_killable(&task->cred_guard_mutex))
+       if (mutex_lock_killable(&task->signal->cred_guard_mutex))
                return NULL;
 
        mm = get_task_mm(task);
@@ -235,7 +235,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
                mmput(mm);
                mm = NULL;
        }
-       mutex_unlock(&task->cred_guard_mutex);
+       mutex_unlock(&task->signal->cred_guard_mutex);
 
        return mm;
 }
@@ -2354,14 +2354,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
                goto out_free;
 
        /* Guard against adverse ptrace interaction */
-       length = mutex_lock_interruptible(&task->cred_guard_mutex);
+       length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
        if (length < 0)
                goto out_free;
 
        length = security_setprocattr(task,
                                      (char*)file->f_path.dentry->d_name.name,
                                      (void*)page, count);
-       mutex_unlock(&task->cred_guard_mutex);
+       mutex_unlock(&task->signal->cred_guard_mutex);
 out_free:
        free_page((unsigned long) page);
 out:
index 1807c24..3799473 100644 (file)
@@ -10,13 +10,13 @@ static int show_softirqs(struct seq_file *p, void *v)
 {
        int i, j;
 
-       seq_printf(p, "                ");
+       seq_printf(p, "                    ");
        for_each_possible_cpu(i)
                seq_printf(p, "CPU%-8d", i);
        seq_printf(p, "\n");
 
        for (i = 0; i < NR_SOFTIRQS; i++) {
-               seq_printf(p, "%8s:", softirq_to_name[i]);
+               seq_printf(p, "%12s:", softirq_to_name[i]);
                for_each_possible_cpu(j)
                        seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
                seq_printf(p, "\n");
index bf31b03..e15a19c 100644 (file)
@@ -31,7 +31,6 @@ static int show_stat(struct seq_file *p, void *v)
        u64 sum_softirq = 0;
        unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
        struct timespec boottime;
-       unsigned int per_irq_sum;
 
        user = nice = system = idle = iowait =
                irq = softirq = steal = cputime64_zero;
@@ -52,9 +51,7 @@ static int show_stat(struct seq_file *p, void *v)
                guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
                guest_nice = cputime64_add(guest_nice,
                        kstat_cpu(i).cpustat.guest_nice);
-               for_each_irq_nr(j) {
-                       sum += kstat_irqs_cpu(j, i);
-               }
+               sum += kstat_cpu_irqs_sum(i);
                sum += arch_irq_stat_cpu(i);
 
                for (j = 0; j < NR_SOFTIRQS; j++) {
@@ -110,13 +107,8 @@ static int show_stat(struct seq_file *p, void *v)
        seq_printf(p, "intr %llu", (unsigned long long)sum);
 
        /* sum again ? it could be updated? */
-       for_each_irq_nr(j) {
-               per_irq_sum = 0;
-               for_each_possible_cpu(i)
-                       per_irq_sum += kstat_irqs_cpu(j, i);
-
-               seq_printf(p, " %u", per_irq_sum);
-       }
+       for_each_irq_nr(j)
+               seq_printf(p, " %u", kstat_irqs(j));
 
        seq_printf(p,
                "\nctxt %llu\n"
index 871e25e..da6b01d 100644 (file)
@@ -327,6 +327,7 @@ struct mem_size_stats {
        unsigned long private_clean;
        unsigned long private_dirty;
        unsigned long referenced;
+       unsigned long anonymous;
        unsigned long swap;
        u64 pss;
 };
@@ -357,6 +358,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                if (!page)
                        continue;
 
+               if (PageAnon(page))
+                       mss->anonymous += PAGE_SIZE;
+
                mss->resident += PAGE_SIZE;
                /* Accumulate the size in pages that have been accessed. */
                if (pte_young(ptent) || PageReferenced(page))
@@ -410,6 +414,7 @@ static int show_smap(struct seq_file *m, void *v)
                   "Private_Clean:  %8lu kB\n"
                   "Private_Dirty:  %8lu kB\n"
                   "Referenced:     %8lu kB\n"
+                  "Anonymous:      %8lu kB\n"
                   "Swap:           %8lu kB\n"
                   "KernelPageSize: %8lu kB\n"
                   "MMUPageSize:    %8lu kB\n",
@@ -421,6 +426,7 @@ static int show_smap(struct seq_file *m, void *v)
                   mss.private_clean >> 10,
                   mss.private_dirty >> 10,
                   mss.referenced >> 10,
+                  mss.anonymous >> 10,
                   mss.swap >> 10,
                   vma_kernel_pagesize(vma) >> 10,
                   vma_mmu_pagesize(vma) >> 10);
index 500a669..b7b10aa 100644 (file)
@@ -67,7 +67,7 @@ static long __estimate_accuracy(struct timespec *tv)
        return slack;
 }
 
-static long estimate_accuracy(struct timespec *tv)
+long select_estimate_accuracy(struct timespec *tv)
 {
        unsigned long ret;
        struct timespec now;
@@ -417,7 +417,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
        }
 
        if (end_time && !timed_out)
-               slack = estimate_accuracy(end_time);
+               slack = select_estimate_accuracy(end_time);
 
        retval = 0;
        for (;;) {
@@ -769,7 +769,7 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
        }
 
        if (end_time && !timed_out)
-               slack = estimate_accuracy(end_time);
+               slack = select_estimate_accuracy(end_time);
 
        for (;;) {
                struct poll_list *walk;
index ca0f239..2bcc5c7 100644 (file)
@@ -33,10 +33,10 @@ typedef u64 cputime64_t;
 
 
 /*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
  */
-#define cputime_to_msecs(__ct)         jiffies_to_msecs(__ct)
-#define msecs_to_cputime(__msecs)      msecs_to_jiffies(__msecs)
+#define cputime_to_usecs(__ct)         jiffies_to_usecs(__ct);
+#define usecs_to_cputime(__msecs)      usecs_to_jiffies(__msecs);
 
 /*
  * Convert cputime to seconds and back.
index 8ca18e2..ff5c660 100644 (file)
@@ -210,7 +210,7 @@ extern void gpio_unexport(unsigned gpio);
 
 #endif /* CONFIG_GPIO_SYSFS */
 
-#else  /* !CONFIG_HAVE_GPIO_LIB */
+#else  /* !CONFIG_GPIOLIB */
 
 static inline int gpio_is_valid(int number)
 {
@@ -239,7 +239,7 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
        gpio_set_value(gpio, value);
 }
 
-#endif /* !CONFIG_HAVE_GPIO_LIB */
+#endif /* !CONFIG_GPIOLIB */
 
 #ifndef CONFIG_GPIO_SYSFS
 
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
new file mode 100644 (file)
index 0000000..198087a
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BASIC_MMIO_GPIO_H
+#define __BASIC_MMIO_GPIO_H
+
+struct bgpio_pdata {
+       int base;
+};
+
+#endif /* __BASIC_MMIO_GPIO_H */
index 709dfb9..ed4ba11 100644 (file)
@@ -154,6 +154,10 @@ enum {
         * A thread in rmdir() is wating for this cgroup.
         */
        CGRP_WAIT_ON_RMDIR,
+       /*
+        * Clone cgroup values when creating a new child cgroup
+        */
+       CGRP_CLONE_CHILDREN,
 };
 
 /* which pidlist file are we talking about? */
index 3a779ff..7e8ca75 100644 (file)
@@ -88,12 +88,6 @@ struct cn_queue_dev {
        unsigned char name[CN_CBQ_NAMELEN];
 
        struct workqueue_struct *cn_queue;
-       /* Sent to kevent to create cn_queue only when needed */
-       struct work_struct wq_creation;
-       /* Tell if the wq_creation job is pending/completed */
-       atomic_t wq_requested;
-       /* Wait for cn_queue to be created */
-       wait_queue_head_t wq_created;
 
        struct list_head queue_list;
        spinlock_t queue_lock;
@@ -141,8 +135,6 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
 
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
-
 struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
 void cn_queue_free_dev(struct cn_queue_dev *dev);
 
index f0268de..7fca3dc 100644 (file)
@@ -931,6 +931,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 #define fb_writel sbus_writel
 #define fb_writeq sbus_writeq
 #define fb_memset sbus_memset_io
+#define fb_memcpy_fromfb sbus_memcpy_fromio
+#define fb_memcpy_tofb sbus_memcpy_toio
 
 #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
 
@@ -943,6 +945,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 #define fb_writel __raw_writel
 #define fb_writeq __raw_writeq
 #define fb_memset memset_io
+#define fb_memcpy_fromfb memcpy_fromio
+#define fb_memcpy_tofb memcpy_toio
 
 #else
 
@@ -955,6 +959,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 #define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
 #define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
 #define fb_memset memset
+#define fb_memcpy_fromfb memcpy
+#define fb_memcpy_tofb memcpy
 
 #endif
 
index 240eb1d..b2a6009 100644 (file)
@@ -1122,6 +1122,7 @@ extern int fcntl_getlease(struct file *filp);
 
 /* fs/locks.c */
 extern void locks_init_lock(struct file_lock *);
+extern struct file_lock * locks_alloc_lock(void);
 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
 extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
 extern void locks_remove_posix(struct file *, fl_owner_t);
@@ -1310,6 +1311,11 @@ struct fasync_struct {
 
 /* SMP safe fasync helpers: */
 extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
+extern int fasync_remove_entry(struct file *, struct fasync_struct **);
+extern struct fasync_struct *fasync_alloc(void);
+extern void fasync_free(struct fasync_struct *);
+
 /* can be called from interrupts */
 extern void kill_fasync(struct fasync_struct **, int, int);
 
index 8a85ec1..e913819 100644 (file)
@@ -37,27 +37,6 @@ extern unsigned long totalhigh_pages;
 
 void kmap_flush_unused(void);
 
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
-
-static inline int kmap_atomic_idx_push(void)
-{
-       int idx = __get_cpu_var(__kmap_atomic_idx)++;
-#ifdef CONFIG_DEBUG_HIGHMEM
-       WARN_ON_ONCE(in_irq() && !irqs_disabled());
-       BUG_ON(idx > KM_TYPE_NR);
-#endif
-       return idx;
-}
-
-static inline int kmap_atomic_idx_pop(void)
-{
-       int idx = --__get_cpu_var(__kmap_atomic_idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
-       BUG_ON(idx < 0);
-#endif
-       return idx;
-}
-
 #else /* CONFIG_HIGHMEM */
 
 static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -95,6 +74,36 @@ static inline void __kunmap_atomic(void *addr)
 
 #endif /* CONFIG_HIGHMEM */
 
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+       int idx = __get_cpu_var(__kmap_atomic_idx)++;
+#ifdef CONFIG_DEBUG_HIGHMEM
+       WARN_ON_ONCE(in_irq() && !irqs_disabled());
+       BUG_ON(idx > KM_TYPE_NR);
+#endif
+       return idx;
+}
+
+static inline int kmap_atomic_idx(void)
+{
+       return __get_cpu_var(__kmap_atomic_idx) - 1;
+}
+
+static inline int kmap_atomic_idx_pop(void)
+{
+       int idx = --__get_cpu_var(__kmap_atomic_idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(idx < 0);
+#endif
+       return idx;
+}
+
+#endif
+
 /*
  * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
  */
index 269181b..3c5d6b6 100644 (file)
 
 #define ADP5588_DEVICE_ID_MASK 0xF
 
+ /* Configuration Register1 */
+#define ADP5588_AUTO_INC       (1 << 7)
+#define ADP5588_GPIEM_CFG      (1 << 6)
+#define ADP5588_INT_CFG                (1 << 4)
+#define ADP5588_GPI_IEN                (1 << 1)
+
+/* Interrupt Status Register */
+#define ADP5588_GPI_INT                (1 << 1)
+#define ADP5588_KE_INT         (1 << 0)
+
+#define ADP5588_MAXGPIO                18
+#define ADP5588_BANK(offs)     ((offs) >> 3)
+#define ADP5588_BIT(offs)      (1u << ((offs) & 0x7))
+
 /* Put one of these structures in i2c_board_info platform_data */
 
 #define ADP5588_KEYMAPSIZE     80
@@ -126,9 +140,12 @@ struct adp5588_kpad_platform_data {
        const struct adp5588_gpio_platform_data *gpio_data;
 };
 
+struct i2c_client; /* forward declaration */
+
 struct adp5588_gpio_platform_data {
-       unsigned gpio_start;            /* GPIO Chip base # */
-       unsigned pullup_dis_mask;       /* Pull-Up Disable Mask */
+       int gpio_start;         /* GPIO Chip base # */
+       unsigned irq_base;      /* interrupt base # */
+       unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
        int     (*setup)(struct i2c_client *client,
                                int gpio, unsigned ngpio,
                                void *context);
index 2fea6c8..1f8c06c 100644 (file)
@@ -29,6 +29,8 @@ extern struct fs_struct init_fs;
                .running = 0,                                           \
                .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock),        \
        },                                                              \
+       .cred_guard_mutex =                                             \
+                __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
 }
 
 extern struct nsproxy init_nsproxy;
@@ -145,8 +147,6 @@ extern struct cred init_cred;
        .group_leader   = &tsk,                                         \
        RCU_INIT_POINTER(.real_cred, &init_cred),                       \
        RCU_INIT_POINTER(.cred, &init_cred),                            \
-       .cred_guard_mutex =                                             \
-                __MUTEX_INITIALIZER(tsk.cred_guard_mutex),             \
        .comm           = "swapper",                                    \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
index 01b2816..79d0c4f 100644 (file)
@@ -410,7 +410,7 @@ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
 static inline void __raise_softirq_irqoff(unsigned int nr)
 {
-       trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+       trace_softirq_raise(nr);
        or_softirq_pending(1UL << nr);
 }
 
index c059044..ad54c84 100644 (file)
@@ -33,6 +33,7 @@ struct kernel_stat {
 #ifndef CONFIG_GENERIC_HARDIRQS
        unsigned int irqs[NR_IRQS];
 #endif
+       unsigned long irqs_sum;
        unsigned int softirqs[NR_SOFTIRQS];
 };
 
@@ -54,6 +55,7 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
                                            struct irq_desc *desc)
 {
        kstat_this_cpu.irqs[irq]++;
+       kstat_this_cpu.irqs_sum++;
 }
 
 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
@@ -65,8 +67,9 @@ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
 #define kstat_irqs_this_cpu(DESC) \
        ((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) \
-       ((DESC)->kstat_irqs[smp_processor_id()]++)
+#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
+       ((DESC)->kstat_irqs[smp_processor_id()]++);\
+       kstat_this_cpu.irqs_sum++; } while (0)
 
 #endif
 
@@ -83,6 +86,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
 /*
  * Number of interrupts per specific IRQ source, since bootup
  */
+#ifndef CONFIG_GENERIC_HARDIRQS
 static inline unsigned int kstat_irqs(unsigned int irq)
 {
        unsigned int sum = 0;
@@ -93,7 +97,17 @@ static inline unsigned int kstat_irqs(unsigned int irq)
 
        return sum;
 }
+#else
+extern unsigned int kstat_irqs(unsigned int irq);
+#endif
 
+/*
+ * Number of interrupts per cpu, since bootup
+ */
+static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+{
+       return kstat_cpu(cpu).irqs_sum;
+}
 
 /*
  * Lock/unlock the current runqueue - to extract task statistics:
index c238ad2..10308c6 100644 (file)
@@ -171,8 +171,17 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
        }
 
 
-/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */
-#define __kfifo_must_check_helper(x) (x)
+static inline unsigned int __must_check
+__kfifo_uint_must_check_helper(unsigned int val)
+{
+       return val;
+}
+
+static inline int __must_check
+__kfifo_int_must_check_helper(int val)
+{
+       return val;
+}
 
 /**
  * kfifo_initialized - Check if the fifo is initialized
@@ -264,7 +273,7 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
  * @fifo: address of the fifo to be used
  */
 #define        kfifo_avail(fifo) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
@@ -297,7 +306,7 @@ __kfifo_must_check_helper( \
  * This function returns the size of the next fifo record in number of bytes.
  */
 #define kfifo_peek_len(fifo) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
@@ -320,7 +329,7 @@ __kfifo_must_check_helper( \
  * Return 0 if no error, otherwise an error code.
  */
 #define kfifo_alloc(fifo, size, gfp_mask) \
-__kfifo_must_check_helper( \
+__kfifo_int_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -416,7 +425,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_get(fifo, val) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((val) + 1) __val = (val); \
@@ -457,7 +466,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_peek(fifo, val) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((val) + 1) __val = (val); \
@@ -549,7 +558,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_out(fifo, buf, n) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((buf) + 1) __buf = (buf); \
@@ -577,7 +586,7 @@ __kfifo_must_check_helper( \
  * copied.
  */
 #define        kfifo_out_spinlocked(fifo, buf, n, lock) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        unsigned long __flags; \
        unsigned int __ret; \
@@ -606,7 +615,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_from_user(fifo, from, len, copied) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
@@ -634,7 +643,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_to_user(fifo, to, len, copied) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
@@ -761,7 +770,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_out_peek(fifo, buf, n) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((buf) + 1) __buf = (buf); \
index fcd3dda..072652d 100644 (file)
@@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
                table->ents[hash & table->mask] = RPS_NO_CPU;
 }
 
-extern struct rps_sock_flow_table *rps_sock_flow_table;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
 /* This structure contains an instance of an RX queue. */
 struct netdev_rx_queue {
-       struct rps_map *rps_map;
-       struct rps_dev_flow_table *rps_flow_table;
-       struct kobject kobj;
-       struct netdev_rx_queue *first;
-       atomic_t count;
+       struct rps_map __rcu            *rps_map;
+       struct rps_dev_flow_table __rcu *rps_flow_table;
+       struct kobject                  kobj;
+       struct netdev_rx_queue          *first;
+       atomic_t                        count;
 } ____cacheline_aligned_in_smp;
 #endif /* CONFIG_RPS */
 
@@ -944,7 +944,7 @@ struct net_device {
        /* Protocol specific pointers */
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       struct vlan_group       *vlgrp;         /* VLAN group */
+       struct vlan_group __rcu *vlgrp;         /* VLAN group */
 #endif
 #ifdef CONFIG_NET_DSA
        void                    *dsa_ptr;       /* dsa specific data */
@@ -952,7 +952,7 @@ struct net_device {
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
        void                    *dn_ptr;        /* DECnet specific data */
-       void                    *ip6_ptr;       /* IPv6 specific data */
+       struct inet6_dev __rcu  *ip6_ptr;       /* IPv6 specific data */
        void                    *ec_ptr;        /* Econet specific data */
        void                    *ax25_ptr;      /* AX.25 specific data */
        struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
@@ -1072,7 +1072,7 @@ struct net_device {
                struct pcpu_dstats __percpu     *dstats; /* dummy stats */
        };
        /* GARP */
-       struct garp_port        *garp_port;
+       struct garp_port __rcu  *garp_port;
 
        /* class/net/name entry */
        struct device           dev;
index 018db9a..27ef6b1 100644 (file)
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                         \
        DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
 
-/*
- * Declaration/definition used for large per-CPU variables that must be
- * aligned to something larger than the pagesize.
- */
-#define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)            \
-       DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")           \
-       __aligned(size)
-
-#define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)             \
-       DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
-       __aligned(size)
-
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
index a6e047a..7da5fa8 100644 (file)
@@ -472,11 +472,7 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
 int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
 struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
 int phy_device_register(struct phy_device *phy);
-int phy_clear_interrupt(struct phy_device *phydev);
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
 int phy_init_hw(struct phy_device *phydev);
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
-               u32 flags, phy_interface_t interface);
 struct phy_device * phy_attach(struct net_device *dev,
                const char *bus_id, u32 flags, phy_interface_t interface);
 struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -492,17 +488,12 @@ void phy_start(struct phy_device *phydev);
 void phy_stop(struct phy_device *phydev);
 int phy_start_aneg(struct phy_device *phydev);
 
-void phy_sanitize_settings(struct phy_device *phydev);
 int phy_stop_interrupts(struct phy_device *phydev);
-int phy_enable_interrupts(struct phy_device *phydev);
-int phy_disable_interrupts(struct phy_device *phydev);
 
 static inline int phy_read_status(struct phy_device *phydev) {
        return phydev->drv->read_status(phydev);
 }
 
-int genphy_config_advert(struct phy_device *phydev);
-int genphy_setup_forced(struct phy_device *phydev);
 int genphy_restart_aneg(struct phy_device *phydev);
 int genphy_config_aneg(struct phy_device *phydev);
 int genphy_update_link(struct phy_device *phydev);
@@ -511,8 +502,6 @@ int genphy_suspend(struct phy_device *phydev);
 int genphy_resume(struct phy_device *phydev);
 void phy_driver_unregister(struct phy_driver *drv);
 int phy_driver_register(struct phy_driver *new_driver);
-void phy_prepare_link(struct phy_device *phydev,
-               void (*adjust_link)(struct net_device *));
 void phy_state_machine(struct work_struct *work);
 void phy_start_machine(struct phy_device *phydev,
                void (*handler)(struct net_device *));
@@ -523,7 +512,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
                struct ifreq *ifr, int cmd);
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
 void phy_device_free(struct phy_device *phydev);
 
 int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
index 600cc1f..56e76af 100644 (file)
@@ -73,6 +73,8 @@ extern void poll_initwait(struct poll_wqueues *pwq);
 extern void poll_freewait(struct poll_wqueues *pwq);
 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
                                 ktime_t *expires, unsigned long slack);
+extern long select_estimate_accuracy(struct timespec *tv);
+
 
 static inline int poll_schedule(struct poll_wqueues *pwq, int state)
 {
index 4272521..092a04f 100644 (file)
 #include <linux/sched.h>               /* For struct task_struct.  */
 
 
-extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
+extern long arch_ptrace(struct task_struct *child, long request,
+                       unsigned long addr, unsigned long data);
 extern int ptrace_traceme(void);
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
@@ -108,7 +109,8 @@ extern int ptrace_attach(struct task_struct *tsk);
 extern int ptrace_detach(struct task_struct *, unsigned int);
 extern void ptrace_disable(struct task_struct *);
 extern int ptrace_check_attach(struct task_struct *task, int kill);
-extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
+extern int ptrace_request(struct task_struct *child, long request,
+                         unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
 extern void __ptrace_link(struct task_struct *child,
                          struct task_struct *new_parent);
@@ -132,8 +134,10 @@ static inline void ptrace_unlink(struct task_struct *child)
                __ptrace_unlink(child);
 }
 
-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data);
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data);
 
 /**
  * task_ptrace - return %PT_* flags that apply to a task
diff --git a/include/linux/ramoops.h b/include/linux/ramoops.h
new file mode 100644 (file)
index 0000000..0ae68a2
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __RAMOOPS_H
+#define __RAMOOPS_H
+
+/*
+ * Ramoops platform data
+ * @mem_size   memory size for ramoops
+ * @mem_address        physical memory address to contain ramoops
+ */
+
+struct ramoops_platform_data {
+       unsigned long   mem_size;
+       unsigned long   mem_address;
+};
+
+#endif
index 25b4f68..8d3a248 100644 (file)
@@ -62,18 +62,6 @@ enum ring_buffer_type {
 unsigned ring_buffer_event_length(struct ring_buffer_event *event);
 void *ring_buffer_event_data(struct ring_buffer_event *event);
 
-/**
- * ring_buffer_event_time_delta - return the delta timestamp of the event
- * @event: the event to get the delta timestamp of
- *
- * The delta timestamp is the 27 bit timestamp since the last event.
- */
-static inline unsigned
-ring_buffer_event_time_delta(struct ring_buffer_event *event)
-{
-       return event->time_delta;
-}
-
 /*
  * ring_buffer_discard_commit will remove an event that has not
  *   ben committed yet. If this is used, then ring_buffer_unlock_commit
index bd6eb0e..0bed941 100644 (file)
@@ -67,6 +67,7 @@
 #define RIO_PW_MSG_SIZE                64
 
 extern struct bus_type rio_bus_type;
+extern struct device rio_bus;
 extern struct list_head rio_devices;   /* list of all devices */
 
 struct rio_mport;
@@ -98,6 +99,7 @@ union rio_pw_msg;
  * @riores: RIO resources this device owns
  * @pwcback: port-write callback function for this device
  * @destid: Network destination ID
+ * @prev: Previous RIO device connected to the current one
  */
 struct rio_dev {
        struct list_head global_list;   /* node in list of all RIO devices */
@@ -111,7 +113,7 @@ struct rio_dev {
        u16 asm_rev;
        u16 efptr;
        u32 pef;
-       u32 swpinfo;            /* Only used for switches */
+       u32 swpinfo;
        u32 src_ops;
        u32 dst_ops;
        u32 comp_tag;
@@ -124,6 +126,7 @@ struct rio_dev {
        struct resource riores[RIO_MAX_DEV_RESOURCES];
        int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
        u16 destid;
+       struct rio_dev *prev;
 };
 
 #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
@@ -174,6 +177,7 @@ enum rio_phy_type {
  * @index: Port index, unique among all port interfaces of the same type
  * @sys_size: RapidIO common transport system size
  * @phy_type: RapidIO phy type
+ * @phys_efptr: RIO port extended features pointer
  * @name: Port name string
  * @priv: Master port private data
  */
@@ -195,6 +199,7 @@ struct rio_mport {
                                 * 1 - Large size, 65536 devices.
                                 */
        enum rio_phy_type phy_type;     /* RapidIO phy type */
+       u32 phys_efptr;
        unsigned char name[40];
        void *priv;             /* Master port private data */
 };
@@ -215,9 +220,14 @@ struct rio_net {
        unsigned char id;       /* RIO network ID */
 };
 
+/* Definitions used by switch sysfs initialization callback */
+#define RIO_SW_SYSFS_CREATE    1       /* Create switch attributes */
+#define RIO_SW_SYSFS_REMOVE    0       /* Remove switch attributes */
+
 /**
  * struct rio_switch - RIO switch info
  * @node: Node in global list of switches
+ * @rdev: Associated RIO device structure
  * @switchid: Switch ID that is unique across a network
  * @hopcount: Hopcount to this switch
  * @destid: Associated destid in the path
@@ -230,9 +240,12 @@ struct rio_net {
  * @get_domain: Callback for switch-specific domain get function
  * @em_init: Callback for switch-specific error management initialization function
  * @em_handle: Callback for switch-specific error management handler function
+ * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
+ * @nextdev: Array of per-port pointers to the next attached device
  */
 struct rio_switch {
        struct list_head node;
+       struct rio_dev *rdev;
        u16 switchid;
        u16 hopcount;
        u16 destid;
@@ -250,6 +263,8 @@ struct rio_switch {
                           u8 *sw_domain);
        int (*em_init) (struct rio_dev *dev);
        int (*em_handle) (struct rio_dev *dev, u8 swport);
+       int (*sw_sysfs) (struct rio_dev *dev, int create);
+       struct rio_dev *nextdev[0];
 };
 
 /* Low-level architecture-dependent routines */
index db50e1c..ee7b6ad 100644 (file)
@@ -34,5 +34,7 @@
 #define RIO_DID_IDTCPS16               0x035b
 #define RIO_DID_IDTCPS6Q               0x035f
 #define RIO_DID_IDTCPS10Q              0x035e
+#define RIO_DID_IDTCPS1848             0x0374
+#define RIO_DID_IDTCPS1616             0x0379
 
 #endif                         /* LINUX_RIO_IDS_H */
index aedee04..d63dcba 100644 (file)
@@ -33,6 +33,7 @@
 #define  RIO_PEF_MEMORY                        0x40000000      /* [I] MMIO */
 #define  RIO_PEF_PROCESSOR             0x20000000      /* [I] Processor */
 #define  RIO_PEF_SWITCH                        0x10000000      /* [I] Switch */
+#define  RIO_PEF_MULTIPORT             0x08000000      /* [VI, 2.1] Multiport */
 #define  RIO_PEF_INB_MBOX              0x00f00000      /* [II] Mailboxes */
 #define  RIO_PEF_INB_MBOX0             0x00800000      /* [II] Mailbox 0 */
 #define  RIO_PEF_INB_MBOX1             0x00400000      /* [II] Mailbox 1 */
@@ -51,6 +52,7 @@
 #define  RIO_SWP_INFO_PORT_TOTAL_MASK  0x0000ff00      /* [I] Total number of ports */
 #define  RIO_SWP_INFO_PORT_NUM_MASK    0x000000ff      /* [I] Maintenance transaction port number */
 #define  RIO_GET_TOTAL_PORTS(x)                ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8)
+#define  RIO_GET_PORT_NUM(x)           (x & RIO_SWP_INFO_PORT_NUM_MASK)
 
 #define RIO_SRC_OPS_CAR                0x18    /* [I] Source Operations CAR */
 #define  RIO_SRC_OPS_READ              0x00008000      /* [I] Read op */
 #define RIO_COMPONENT_TAG_CSR  0x6c    /* [III] Component Tag CSR */
 
 #define RIO_STD_RTE_CONF_DESTID_SEL_CSR        0x70
+#define  RIO_STD_RTE_CONF_EXTCFGEN             0x80000000
 #define RIO_STD_RTE_CONF_PORT_SEL_CSR  0x74
 #define RIO_STD_RTE_DEFAULT_PORT       0x78
 
 #define  RIO_PORT_GEN_MASTER           0x40000000
 #define  RIO_PORT_GEN_DISCOVERED       0x20000000
 #define RIO_PORT_N_MNT_REQ_CSR(x)      (0x0040 + x*0x20)       /* 0x0002 */
+#define  RIO_MNT_REQ_CMD_RD            0x03    /* Reset-device command */
+#define  RIO_MNT_REQ_CMD_IS            0x04    /* Input-status command */
 #define RIO_PORT_N_MNT_RSP_CSR(x)      (0x0044 + x*0x20)       /* 0x0002 */
 #define  RIO_PORT_N_MNT_RSP_RVAL       0x80000000 /* Response Valid */
-#define  RIO_PORT_N_MNT_RSP_ASTAT      0x000003e0 /* ackID Status */
+#define  RIO_PORT_N_MNT_RSP_ASTAT      0x000007e0 /* ackID Status */
 #define  RIO_PORT_N_MNT_RSP_LSTAT      0x0000001f /* Link Status */
 #define RIO_PORT_N_ACK_STS_CSR(x)      (0x0048 + x*0x20)       /* 0x0002 */
 #define  RIO_PORT_N_ACK_CLEAR          0x80000000
-#define  RIO_PORT_N_ACK_INBOUND                0x1f000000
-#define  RIO_PORT_N_ACK_OUTSTAND       0x00001f00
-#define  RIO_PORT_N_ACK_OUTBOUND       0x0000001f
+#define  RIO_PORT_N_ACK_INBOUND                0x3f000000
+#define  RIO_PORT_N_ACK_OUTSTAND       0x00003f00
+#define  RIO_PORT_N_ACK_OUTBOUND       0x0000003f
 #define RIO_PORT_N_ERR_STS_CSR(x)      (0x0058 + x*0x20)
 #define  RIO_PORT_N_ERR_STS_PW_OUT_ES  0x00010000 /* Output Error-stopped */
 #define  RIO_PORT_N_ERR_STS_PW_INP_ES  0x00000100 /* Input Error-stopped */
 #define  RIO_PORT_N_ERR_STS_PORT_ERR   0x00000004
 #define  RIO_PORT_N_ERR_STS_PORT_OK    0x00000002
 #define  RIO_PORT_N_ERR_STS_PORT_UNINIT        0x00000001
-#define  RIO_PORT_N_ERR_STS_CLR_MASK   0x07120204
 #define RIO_PORT_N_CTL_CSR(x)          (0x005c + x*0x20)
 #define  RIO_PORT_N_CTL_PWIDTH         0xc0000000
 #define  RIO_PORT_N_CTL_PWIDTH_1       0x00000000
 #define RIO_EM_EFB_HEADER      0x000   /* Error Management Extensions Block Header */
 #define RIO_EM_LTL_ERR_DETECT  0x008   /* Logical/Transport Layer Error Detect CSR */
 #define RIO_EM_LTL_ERR_EN      0x00c   /* Logical/Transport Layer Error Enable CSR */
+#define  REM_LTL_ERR_ILLTRAN           0x08000000 /* Illegal Transaction decode */
+#define  REM_LTL_ERR_UNSOLR            0x00800000 /* Unsolicited Response */
+#define  REM_LTL_ERR_UNSUPTR           0x00400000 /* Unsupported Transaction */
+#define  REM_LTL_ERR_IMPSPEC           0x000000ff /* Implementation Specific */
 #define RIO_EM_LTL_HIADDR_CAP  0x010   /* Logical/Transport Layer High Address Capture CSR */
 #define RIO_EM_LTL_ADDR_CAP    0x014   /* Logical/Transport Layer Address Capture CSR */
 #define RIO_EM_LTL_DEVID_CAP   0x018   /* Logical/Transport Layer Device ID Capture CSR */
index 393ce94..be7adb7 100644 (file)
@@ -626,6 +626,10 @@ struct signal_struct {
 
        int oom_adj;            /* OOM kill score adjustment (bit shift) */
        int oom_score_adj;      /* OOM kill score adjustment */
+
+       struct mutex cred_guard_mutex;  /* guard against foreign influences on
+                                        * credential calculations
+                                        * (notably. ptrace) */
 };
 
 /* Context switch must be unlocked if interrupts are to be enabled */
@@ -1305,9 +1309,6 @@ struct task_struct {
                                         * credentials (COW) */
        const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
-       struct mutex cred_guard_mutex;  /* guard against foreign influences on
-                                        * credential calculations
-                                        * (notably. ptrace) */
        struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
 
        char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -2236,9 +2237,16 @@ static inline void task_unlock(struct task_struct *p)
        spin_unlock(&p->alloc_lock);
 }
 
-extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
                                                        unsigned long *flags);
 
+#define lock_task_sighand(tsk, flags)                                  \
+({     struct sighand_struct *__ss;                                    \
+       __cond_lock(&(tsk)->sighand->siglock,                           \
+                   (__ss = __lock_task_sighand(tsk, flags)));          \
+       __ss;                                                           \
+})                                                                     \
+
 static inline void unlock_task_sighand(struct task_struct *tsk,
                                                unsigned long *flags)
 {
diff --git a/include/linux/spi/74x164.h b/include/linux/spi/74x164.h
new file mode 100644 (file)
index 0000000..d85c52f
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef LINUX_SPI_74X164_H
+#define LINUX_SPI_74X164_H
+
+#define GEN_74X164_DRIVER_NAME "74x164"
+
+struct gen_74x164_chip_platform_data {
+       /* number assigned to the first GPIO */
+       unsigned        base;
+};
+
+#endif
index 0ff2779..2e7d81c 100644 (file)
 #define MGSL_MODE_BISYNC       4
 #define MGSL_MODE_RAW          6
 #define MGSL_MODE_BASE_CLOCK    7
+#define MGSL_MODE_XSYNC         8
 
 #define MGSL_BUS_TYPE_ISA      1
 #define MGSL_BUS_TYPE_EISA     2
@@ -290,6 +291,10 @@ struct gpio_desc {
 #define MGSL_IOCSGPIO          _IOW(MGSL_MAGIC_IOC,16,struct gpio_desc)
 #define MGSL_IOCGGPIO          _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc)
 #define MGSL_IOCWAITGPIO       _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc)
+#define MGSL_IOCSXSYNC         _IO(MGSL_MAGIC_IOC, 19)
+#define MGSL_IOCGXSYNC         _IO(MGSL_MAGIC_IOC, 20)
+#define MGSL_IOCSXCTRL         _IO(MGSL_MAGIC_IOC, 21)
+#define MGSL_IOCGXCTRL         _IO(MGSL_MAGIC_IOC, 22)
 
 #ifdef __KERNEL__
 /* provide 32 bit ioctl compatibility on 64 bit systems */
index e6319d1..cacc27a 100644 (file)
@@ -701,7 +701,8 @@ asmlinkage long sys_nfsservctl(int cmd,
 asmlinkage long sys_syslog(int type, char __user *buf, int len);
 asmlinkage long sys_uselib(const char __user *library);
 asmlinkage long sys_ni_syscall(void);
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data);
+asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
+                          unsigned long data);
 
 asmlinkage long sys_add_key(const char __user *_type,
                            const char __user *_description,
index 10db010..3a2e66d 100644 (file)
@@ -150,7 +150,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
  *
  * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
  *
- * @task->cred_guard_mutex is held by the caller through the do_execve().
+ * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
  */
 static inline int tracehook_unsafe_exec(struct task_struct *task)
 {
index 1faa80d..e68b439 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
-#include <linux/types.h>
 
 /* The feature bitmap for virtio 9P */
 
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
new file mode 100644 (file)
index 0000000..5bcce55
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SHM_H_
+#define CAIF_SHM_H_
+
+struct shmdev_layer {
+       u32 shm_base_addr;
+       u32 shm_total_sz;
+       u32 shm_id;
+       u32 shm_loopback;
+       void *hmbx;
+       int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
+       int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
+                               struct shmdev_layer *pshm_dev, void *pshm_drv);
+       struct net_device *pshm_netdev;
+};
+
+extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
+extern void caif_shmcore_remove(struct net_device *pshm_netdev);
+
+#endif
index a217c83..ffe9cb7 100644 (file)
@@ -95,7 +95,7 @@ struct dst_entry {
        unsigned long           lastuse;
        union {
                struct dst_entry *next;
-               struct rtable    *rt_next;
+               struct rtable __rcu *rt_next;
                struct rt6_info   *rt6_next;
                struct dn_route  *dn_next;
        };
index 106f309..075f1e3 100644 (file)
@@ -20,7 +20,7 @@ struct fib_rule {
        u32                     table;
        u8                      action;
        u32                     target;
-       struct fib_rule *       ctarget;
+       struct fib_rule __rcu   *ctarget;
        char                    iifname[IFNAMSIZ];
        char                    oifname[IFNAMSIZ];
        struct rcu_head         rcu;
index 825f172..f4c2959 100644 (file)
@@ -107,7 +107,7 @@ struct garp_applicant {
 };
 
 struct garp_port {
-       struct garp_applicant   *applicants[GARP_APPLICATION_MAX + 1];
+       struct garp_applicant __rcu     *applicants[GARP_APPLICATION_MAX + 1];
 };
 
 extern int     garp_register_application(struct garp_application *app);
index 417d0c8..fe239bf 100644 (file)
@@ -15,7 +15,7 @@
 
 struct inet_peer {
        /* group together avl_left,avl_right,v4daddr to speedup lookups */
-       struct inet_peer        *avl_left, *avl_right;
+       struct inet_peer __rcu  *avl_left, *avl_right;
        __be32                  v4daddr;        /* peer's address */
        __u32                   avl_height;
        struct list_head        unused;
index dbee3fe..86e2b18 100644 (file)
@@ -59,7 +59,7 @@ struct ipcm_cookie {
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 
 struct ip_ra_chain {
-       struct ip_ra_chain      *next;
+       struct ip_ra_chain __rcu *next;
        struct sock             *sk;
        union {
                void                    (*destructor)(struct sock *);
@@ -68,7 +68,7 @@ struct ip_ra_chain {
        struct rcu_head         rcu;
 };
 
-extern struct ip_ra_chain *ip_ra_chain;
+extern struct ip_ra_chain __rcu *ip_ra_chain;
 
 /* IP flags. */
 #define IP_CE          0x8000          /* Flag: "Congestion"           */
index fc94ec5..fc73e66 100644 (file)
@@ -13,7 +13,7 @@
 /* IPv6 tunnel */
 
 struct ip6_tnl {
-       struct ip6_tnl *next;   /* next tunnel in list */
+       struct ip6_tnl __rcu *next;     /* next tunnel in list */
        struct net_device *dev; /* virtual device associated with tunnel */
        struct ip6_tnl_parm parms;      /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
index 58abbf9..a32654d 100644 (file)
@@ -16,7 +16,7 @@ struct ip_tunnel_6rd_parm {
 };
 
 struct ip_tunnel {
-       struct ip_tunnel        *next;
+       struct ip_tunnel __rcu  *next;
        struct net_device       *dev;
 
        int                     err_count;      /* Number of arrived ICMP errors */
@@ -34,12 +34,12 @@ struct ip_tunnel {
 #ifdef CONFIG_IPV6_SIT_6RD
        struct ip_tunnel_6rd_parm       ip6rd;
 #endif
-       struct ip_tunnel_prl_entry      *prl;           /* potential router list */
+       struct ip_tunnel_prl_entry __rcu *prl;          /* potential router list */
        unsigned int                    prl_count;      /* # of entries in PRL */
 };
 
 struct ip_tunnel_prl_entry {
-       struct ip_tunnel_prl_entry      *next;
+       struct ip_tunnel_prl_entry __rcu *next;
        __be32                          addr;
        u16                             flags;
        struct rcu_head                 rcu_head;
index 65af9a0..1bf812b 100644 (file)
@@ -88,7 +88,7 @@ struct net {
 #ifdef CONFIG_WEXT_CORE
        struct sk_buff_head     wext_nlevents;
 #endif
-       struct net_generic      *gen;
+       struct net_generic __rcu        *gen;
 
        /* Note : following structs are cache line aligned */
 #ifdef CONFIG_XFRM
index f1effdd..dc07495 100644 (file)
@@ -89,10 +89,10 @@ struct inet_protosw {
 #define INET_PROTOSW_PERMANENT 0x02  /* Permanent protocols are unremovable. */
 #define INET_PROTOSW_ICSK      0x04  /* Is this an inet_connection_sock? */
 
-extern const struct net_protocol *inet_protos[MAX_INET_PROTOS];
+extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
+extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
 extern int     inet_add_protocol(const struct net_protocol *prot, unsigned char num);
index 73a4f97..c7a7362 100644 (file)
@@ -301,7 +301,7 @@ struct sock {
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
        long                    sk_sndtimeo;
-       struct sk_filter        *sk_filter;
+       struct sk_filter __rcu  *sk_filter;
        void                    *sk_protinfo;
        struct timer_list       sk_timer;
        ktime_t                 sk_stamp;
index f28d7c9..bcfb6b2 100644 (file)
@@ -1264,7 +1264,7 @@ struct xfrm_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, u32 info);
 
-       struct xfrm_tunnel *next;
+       struct xfrm_tunnel __rcu *next;
        int priority;
 };
 
@@ -1272,7 +1272,7 @@ struct xfrm6_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
                           u8 type, u8 code, int offset, __be32 info);
-       struct xfrm6_tunnel *next;
+       struct xfrm6_tunnel __rcu *next;
        int priority;
 };
 
index 6fa7cba..1c09820 100644 (file)
@@ -86,76 +86,62 @@ TRACE_EVENT(irq_handler_exit,
 
 DECLARE_EVENT_CLASS(softirq,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec),
+       TP_ARGS(vec_nr),
 
        TP_STRUCT__entry(
-               __field(        int,    vec                     )
+               __field(        unsigned int,   vec     )
        ),
 
        TP_fast_assign(
-               if (vec)
-                       __entry->vec = (int)(h - vec);
-               else
-                       __entry->vec = (int)(long)h;
+               __entry->vec = vec_nr;
        ),
 
-       TP_printk("vec=%d [action=%s]", __entry->vec,
+       TP_printk("vec=%u [action=%s]", __entry->vec,
                  show_softirq_name(__entry->vec))
 );
 
 /**
  * softirq_entry - called immediately before the softirq handler
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr:  softirq vector number
  *
- * The @h parameter, contains a pointer to the struct softirq_action
- * which has a pointer to the action handler that is called. By subtracting
- * the @vec pointer from the @h pointer, we can determine the softirq
- * number. Also, when used in combination with the softirq_exit tracepoint
- * we can determine the softirq latency.
+ * When used in combination with the softirq_exit tracepoint
+ * we can determine the softirq handler runtine.
  */
 DEFINE_EVENT(softirq, softirq_entry,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec)
+       TP_ARGS(vec_nr)
 );
 
 /**
  * softirq_exit - called immediately after the softirq handler returns
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr:  softirq vector number
  *
- * The @h parameter contains a pointer to the struct softirq_action
- * that has handled the softirq. By subtracting the @vec pointer from
- * the @h pointer, we can determine the softirq number. Also, when used in
- * combination with the softirq_entry tracepoint we can determine the softirq
- * latency.
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq handler runtine.
  */
 DEFINE_EVENT(softirq, softirq_exit,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec)
+       TP_ARGS(vec_nr)
 );
 
 /**
  * softirq_raise - called immediately when a softirq is raised
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr:  softirq vector number
  *
- * The @h parameter contains a pointer to the softirq vector number which is
- * raised. @vec is NULL and it means @h includes vector number not
- * softirq_action. When used in combination with the softirq_entry tracepoint
- * we can determine the softirq raise latency.
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise to run latency.
  */
 DEFINE_EVENT(softirq, softirq_raise,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec)
+       TP_ARGS(vec_nr)
 );
 
 #endif /*  _TRACE_IRQ_H */
index 3ae8ffe..88c1046 100644 (file)
@@ -518,7 +518,6 @@ if CGROUPS
 
 config CGROUP_DEBUG
        bool "Example debug cgroup subsystem"
-       depends on CGROUPS
        default n
        help
          This option enables a simple cgroup subsystem that
@@ -529,7 +528,6 @@ config CGROUP_DEBUG
 
 config CGROUP_NS
        bool "Namespace cgroup subsystem"
-       depends on CGROUPS
        help
          Provides a simple namespace cgroup subsystem to
          provide hierarchical naming of sets of namespaces,
@@ -538,21 +536,18 @@ config CGROUP_NS
 
 config CGROUP_FREEZER
        bool "Freezer cgroup subsystem"
-       depends on CGROUPS
        help
          Provides a way to freeze and unfreeze all tasks in a
          cgroup.
 
 config CGROUP_DEVICE
        bool "Device controller for cgroups"
-       depends on CGROUPS && EXPERIMENTAL
        help
          Provides a cgroup implementing whitelists for devices which
          a process in the cgroup can mknod or open.
 
 config CPUSETS
        bool "Cpuset support"
-       depends on CGROUPS
        help
          This option will let you create and manage CPUSETs which
          allow dynamically partitioning a system into sets of CPUs and
@@ -568,7 +563,6 @@ config PROC_PID_CPUSET
 
 config CGROUP_CPUACCT
        bool "Simple CPU accounting cgroup subsystem"
-       depends on CGROUPS
        help
          Provides a simple Resource Controller for monitoring the
          total CPU consumed by the tasks in a cgroup.
@@ -578,11 +572,10 @@ config RESOURCE_COUNTERS
        help
          This option enables controller independent resource accounting
          infrastructure that works with cgroups.
-       depends on CGROUPS
 
 config CGROUP_MEM_RES_CTLR
        bool "Memory Resource Controller for Control Groups"
-       depends on CGROUPS && RESOURCE_COUNTERS
+       depends on RESOURCE_COUNTERS
        select MM_OWNER
        help
          Provides a memory resource controller that manages both anonymous
@@ -623,7 +616,7 @@ config CGROUP_MEM_RES_CTLR_SWAP
 
 menuconfig CGROUP_SCHED
        bool "Group CPU scheduler"
-       depends on EXPERIMENTAL && CGROUPS
+       depends on EXPERIMENTAL
        default n
        help
          This feature lets CPU scheduler recognize task groups and control CPU
@@ -652,7 +645,7 @@ endif #CGROUP_SCHED
 
 config BLK_CGROUP
        tristate "Block IO controller"
-       depends on CGROUPS && BLOCK
+       depends on BLOCK
        default n
        ---help---
        Generic block IO controller cgroup interface. This is the common
@@ -682,6 +675,59 @@ config DEBUG_BLK_CGROUP
 
 endif # CGROUPS
 
+menuconfig NAMESPACES
+       bool "Namespaces support" if EMBEDDED
+       default !EMBEDDED
+       help
+         Provides the way to make tasks work with different objects using
+         the same id. For example same IPC id may refer to different objects
+         or same user id or pid may refer to different tasks when used in
+         different namespaces.
+
+if NAMESPACES
+
+config UTS_NS
+       bool "UTS namespace"
+       default y
+       help
+         In this namespace tasks see different info provided with the
+         uname() system call
+
+config IPC_NS
+       bool "IPC namespace"
+       depends on (SYSVIPC || POSIX_MQUEUE)
+       default y
+       help
+         In this namespace tasks work with IPC ids which correspond to
+         different IPC objects in different namespaces.
+
+config USER_NS
+       bool "User namespace (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       default y
+       help
+         This allows containers, i.e. vservers, to use user namespaces
+         to provide different user info for different servers.
+         If unsure, say N.
+
+config PID_NS
+       bool "PID Namespaces"
+       default y
+       help
+         Support process id namespaces.  This allows having multiple
+         processes with the same pid as long as they are in different
+         pid namespaces.  This is a building block of containers.
+
+config NET_NS
+       bool "Network namespace"
+       depends on NET
+       default y
+       help
+         Allow user space to create what appear to be multiple instances
+         of the network stack.
+
+endif # NAMESPACES
+
 config MM_OWNER
        bool
 
@@ -734,57 +780,6 @@ config RELAY
 
          If unsure, say N.
 
-config NAMESPACES
-       bool "Namespaces support" if EMBEDDED
-       default !EMBEDDED
-       help
-         Provides the way to make tasks work with different objects using
-         the same id. For example same IPC id may refer to different objects
-         or same user id or pid may refer to different tasks when used in
-         different namespaces.
-
-config UTS_NS
-       bool "UTS namespace"
-       depends on NAMESPACES
-       help
-         In this namespace tasks see different info provided with the
-         uname() system call
-
-config IPC_NS
-       bool "IPC namespace"
-       depends on NAMESPACES && (SYSVIPC || POSIX_MQUEUE)
-       help
-         In this namespace tasks work with IPC ids which correspond to
-         different IPC objects in different namespaces.
-
-config USER_NS
-       bool "User namespace (EXPERIMENTAL)"
-       depends on NAMESPACES && EXPERIMENTAL
-       help
-         This allows containers, i.e. vservers, to use user namespaces
-         to provide different user info for different servers.
-         If unsure, say N.
-
-config PID_NS
-       bool "PID Namespaces (EXPERIMENTAL)"
-       default n
-       depends on NAMESPACES && EXPERIMENTAL
-       help
-         Support process id namespaces.  This allows having multiple
-         processes with the same pid as long as they are in different
-         pid namespaces.  This is a building block of containers.
-
-         Unless you want to work with an experimental feature
-         say N here.
-
-config NET_NS
-       bool "Network namespace"
-       default n
-       depends on NAMESPACES && EXPERIMENTAL && NET
-       help
-         Allow user space to create what appear to be multiple instances
-         of the network stack.
-
 config BLK_DEV_INITRD
        bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
        depends on BROKEN || !FRV
index 9dc2c7d..845a287 100644 (file)
@@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
        struct semid64_ds __user *up64;
        int version = compat_ipc_parse_version(&third);
 
+       memset(&s64, 0, sizeof(s64));
+
        if (!uptr)
                return -EINVAL;
        if (get_user(pad, (u32 __user *) uptr))
@@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
        int version = compat_ipc_parse_version(&second);
        void __user *p;
 
+       memset(&m64, 0, sizeof(m64));
+
        switch (second & (~IPC_64)) {
        case IPC_INFO:
        case IPC_RMID:
@@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
        int err, err2;
        int version = compat_ipc_parse_version(&second);
 
+       memset(&s64, 0, sizeof(s64));
+
        switch (second & (~IPC_64)) {
        case IPC_RMID:
        case SHM_LOCK:
index d8d1e9f..380ea4f 100644 (file)
@@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
        void __user *p = NULL;
        if (u_attr && oflag & O_CREAT) {
                struct mq_attr attr;
+
+               memset(&attr, 0, sizeof(attr));
+
                p = compat_alloc_user_space(sizeof(attr));
                if (get_compat_mq_attr(&attr, u_attr) ||
                    copy_to_user(p, &attr, sizeof(attr)))
@@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
        struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
        long ret;
 
+       memset(&mqstat, 0, sizeof(mqstat));
+
        if (u_mqstat) {
                if (get_compat_mq_attr(&mqstat, u_mqstat) ||
                    copy_to_user(p, &mqstat, sizeof(mqstat)))
index 7bc46a9..fd658a1 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -108,7 +108,11 @@ void __init shm_init (void)
 {
        shm_init_ns(&init_ipc_ns);
        ipc_init_proc_interface("sysvipc/shm",
-                               "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n",
+#if BITS_PER_LONG <= 32
+                               "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
+#else
+                               "       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
+#endif
                                IPC_SHM_IDS, sysvipc_shm_proc_show);
 }
 
@@ -543,6 +547,34 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
        }
 }
 
+/*
+ * Calculate and add used RSS and swap pages of a shm.
+ * Called with shm_ids.rw_mutex held as a reader
+ */
+static void shm_add_rss_swap(struct shmid_kernel *shp,
+       unsigned long *rss_add, unsigned long *swp_add)
+{
+       struct inode *inode;
+
+       inode = shp->shm_file->f_path.dentry->d_inode;
+
+       if (is_file_hugepages(shp->shm_file)) {
+               struct address_space *mapping = inode->i_mapping;
+               struct hstate *h = hstate_file(shp->shm_file);
+               *rss_add += pages_per_huge_page(h) * mapping->nrpages;
+       } else {
+#ifdef CONFIG_SHMEM
+               struct shmem_inode_info *info = SHMEM_I(inode);
+               spin_lock(&info->lock);
+               *rss_add += inode->i_mapping->nrpages;
+               *swp_add += info->swapped;
+               spin_unlock(&info->lock);
+#else
+               *rss_add += inode->i_mapping->nrpages;
+#endif
+       }
+}
+
 /*
  * Called with shm_ids.rw_mutex held as a reader
  */
@@ -560,30 +592,13 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
        for (total = 0, next_id = 0; total < in_use; next_id++) {
                struct kern_ipc_perm *ipc;
                struct shmid_kernel *shp;
-               struct inode *inode;
 
                ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
                if (ipc == NULL)
                        continue;
                shp = container_of(ipc, struct shmid_kernel, shm_perm);
 
-               inode = shp->shm_file->f_path.dentry->d_inode;
-
-               if (is_file_hugepages(shp->shm_file)) {
-                       struct address_space *mapping = inode->i_mapping;
-                       struct hstate *h = hstate_file(shp->shm_file);
-                       *rss += pages_per_huge_page(h) * mapping->nrpages;
-               } else {
-#ifdef CONFIG_SHMEM
-                       struct shmem_inode_info *info = SHMEM_I(inode);
-                       spin_lock(&info->lock);
-                       *rss += inode->i_mapping->nrpages;
-                       *swp += info->swapped;
-                       spin_unlock(&info->lock);
-#else
-                       *rss += inode->i_mapping->nrpages;
-#endif
-               }
+               shm_add_rss_swap(shp, rss, swp);
 
                total++;
        }
@@ -1072,6 +1087,9 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
 {
        struct shmid_kernel *shp = it;
+       unsigned long rss = 0, swp = 0;
+
+       shm_add_rss_swap(shp, &rss, &swp);
 
 #if BITS_PER_LONG <= 32
 #define SIZE_SPEC "%10lu"
@@ -1081,7 +1099,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
 
        return seq_printf(s,
                          "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
-                         "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
+                         "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
+                         SIZE_SPEC " " SIZE_SPEC "\n",
                          shp->shm_perm.key,
                          shp->shm_perm.id,
                          shp->shm_perm.mode,
@@ -1095,6 +1114,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
                          shp->shm_perm.cgid,
                          shp->shm_atim,
                          shp->shm_dtim,
-                         shp->shm_ctim);
+                         shp->shm_ctim,
+                         rss * PAGE_SIZE,
+                         swp * PAGE_SIZE);
 }
 #endif
index 9270d53..5cf3669 100644 (file)
@@ -243,6 +243,11 @@ static int notify_on_release(const struct cgroup *cgrp)
        return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
 }
 
+static int clone_children(const struct cgroup *cgrp)
+{
+       return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+}
+
 /*
  * for_each_subsys() allows you to iterate on each subsystem attached to
  * an active hierarchy
@@ -1040,6 +1045,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",noprefix");
        if (strlen(root->release_agent_path))
                seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+       if (clone_children(&root->top_cgroup))
+               seq_puts(seq, ",clone_children");
        if (strlen(root->name))
                seq_printf(seq, ",name=%s", root->name);
        mutex_unlock(&cgroup_mutex);
@@ -1050,6 +1057,7 @@ struct cgroup_sb_opts {
        unsigned long subsys_bits;
        unsigned long flags;
        char *release_agent;
+       bool clone_children;
        char *name;
        /* User explicitly requested empty subsystem */
        bool none;
@@ -1066,7 +1074,8 @@ struct cgroup_sb_opts {
  */
 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
 {
-       char *token, *o = data ?: "all";
+       char *token, *o = data;
+       bool all_ss = false, one_ss = false;
        unsigned long mask = (unsigned long)-1;
        int i;
        bool module_pin_failed = false;
@@ -1082,22 +1091,27 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        while ((token = strsep(&o, ",")) != NULL) {
                if (!*token)
                        return -EINVAL;
-               if (!strcmp(token, "all")) {
-                       /* Add all non-disabled subsystems */
-                       opts->subsys_bits = 0;
-                       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-                               struct cgroup_subsys *ss = subsys[i];
-                               if (ss == NULL)
-                                       continue;
-                               if (!ss->disabled)
-                                       opts->subsys_bits |= 1ul << i;
-                       }
-               } else if (!strcmp(token, "none")) {
+               if (!strcmp(token, "none")) {
                        /* Explicitly have no subsystems */
                        opts->none = true;
-               } else if (!strcmp(token, "noprefix")) {
+                       continue;
+               }
+               if (!strcmp(token, "all")) {
+                       /* Mutually exclusive option 'all' + subsystem name */
+                       if (one_ss)
+                               return -EINVAL;
+                       all_ss = true;
+                       continue;
+               }
+               if (!strcmp(token, "noprefix")) {
                        set_bit(ROOT_NOPREFIX, &opts->flags);
-               } else if (!strncmp(token, "release_agent=", 14)) {
+                       continue;
+               }
+               if (!strcmp(token, "clone_children")) {
+                       opts->clone_children = true;
+                       continue;
+               }
+               if (!strncmp(token, "release_agent=", 14)) {
                        /* Specifying two release agents is forbidden */
                        if (opts->release_agent)
                                return -EINVAL;
@@ -1105,7 +1119,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                                kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
                        if (!opts->release_agent)
                                return -ENOMEM;
-               } else if (!strncmp(token, "name=", 5)) {
+                       continue;
+               }
+               if (!strncmp(token, "name=", 5)) {
                        const char *name = token + 5;
                        /* Can't specify an empty name */
                        if (!strlen(name))
@@ -1127,20 +1143,44 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                                              GFP_KERNEL);
                        if (!opts->name)
                                return -ENOMEM;
-               } else {
-                       struct cgroup_subsys *ss;
-                       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-                               ss = subsys[i];
-                               if (ss == NULL)
-                                       continue;
-                               if (!strcmp(token, ss->name)) {
-                                       if (!ss->disabled)
-                                               set_bit(i, &opts->subsys_bits);
-                                       break;
-                               }
-                       }
-                       if (i == CGROUP_SUBSYS_COUNT)
-                               return -ENOENT;
+
+                       continue;
+               }
+
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+                       struct cgroup_subsys *ss = subsys[i];
+                       if (ss == NULL)
+                               continue;
+                       if (strcmp(token, ss->name))
+                               continue;
+                       if (ss->disabled)
+                               continue;
+
+                       /* Mutually exclusive option 'all' + subsystem name */
+                       if (all_ss)
+                               return -EINVAL;
+                       set_bit(i, &opts->subsys_bits);
+                       one_ss = true;
+
+                       break;
+               }
+               if (i == CGROUP_SUBSYS_COUNT)
+                       return -ENOENT;
+       }
+
+       /*
+        * If the 'all' option was specified select all the subsystems,
+        * otherwise 'all, 'none' and a subsystem name options were not
+        * specified, let's default to 'all'
+        */
+       if (all_ss || (!all_ss && !one_ss && !opts->none)) {
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+                       struct cgroup_subsys *ss = subsys[i];
+                       if (ss == NULL)
+                               continue;
+                       if (ss->disabled)
+                               continue;
+                       set_bit(i, &opts->subsys_bits);
                }
        }
 
@@ -1355,6 +1395,8 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
                strcpy(root->release_agent_path, opts->release_agent);
        if (opts->name)
                strcpy(root->name, opts->name);
+       if (opts->clone_children)
+               set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
        return root;
 }
 
@@ -1880,6 +1922,8 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
                                      const char *buffer)
 {
        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+       if (strlen(buffer) >= PATH_MAX)
+               return -EINVAL;
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
        strcpy(cgrp->root->release_agent_path, buffer);
@@ -3173,6 +3217,23 @@ fail:
        return ret;
 }
 
+static u64 cgroup_clone_children_read(struct cgroup *cgrp,
+                                   struct cftype *cft)
+{
+       return clone_children(cgrp);
+}
+
+static int cgroup_clone_children_write(struct cgroup *cgrp,
+                                    struct cftype *cft,
+                                    u64 val)
+{
+       if (val)
+               set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+       else
+               clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+       return 0;
+}
+
 /*
  * for the common functions, 'private' gives the type of file
  */
@@ -3203,6 +3264,11 @@ static struct cftype files[] = {
                .write_string = cgroup_write_event_control,
                .mode = S_IWUGO,
        },
+       {
+               .name = "cgroup.clone_children",
+               .read_u64 = cgroup_clone_children_read,
+               .write_u64 = cgroup_clone_children_write,
+       },
 };
 
 static struct cftype cft_release_agent = {
@@ -3332,6 +3398,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        if (notify_on_release(parent))
                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
 
+       if (clone_children(parent))
+               set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+
        for_each_subsys(root, ss) {
                struct cgroup_subsys_state *css = ss->create(ss, cgrp);
 
@@ -3346,6 +3415,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                                goto err_destroy;
                }
                /* At error, ->destroy() callback has to free assigned ID. */
+               if (clone_children(parent) && ss->post_clone)
+                       ss->post_clone(ss, cgrp);
        }
 
        cgroup_lock_hierarchy(root);
index ce71ed5..e7bebb7 100644 (file)
@@ -48,20 +48,19 @@ static inline struct freezer *task_freezer(struct task_struct *task)
                            struct freezer, css);
 }
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
 {
-       struct freezer *freezer;
-       enum freezer_state state;
+       enum freezer_state state = task_freezer(task)->state;
+       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+}
 
+int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+       int result;
        task_lock(task);
-       freezer = task_freezer(task);
-       if (!freezer->css.cgroup->parent)
-               state = CGROUP_THAWED; /* root cgroup can't be frozen */
-       else
-               state = freezer->state;
+       result = __cgroup_freezing_or_frozen(task);
        task_unlock(task);
-
-       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+       return result;
 }
 
 /*
@@ -154,13 +153,6 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
-/* Task is frozen or will freeze immediately when next it gets woken */
-static bool is_task_frozen_enough(struct task_struct *task)
-{
-       return frozen(task) ||
-               (task_is_stopped_or_traced(task) && freezing(task));
-}
-
 /*
  * The call to cgroup_lock() in the freezer.state write method prevents
  * a write to that file racing against an attach, and hence the
@@ -174,24 +166,25 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
 
        /*
         * Anything frozen can't move or be moved to/from.
-        *
-        * Since orig_freezer->state == FROZEN means that @task has been
-        * frozen, so it's sufficient to check the latter condition.
         */
 
-       if (is_task_frozen_enough(task))
+       freezer = cgroup_freezer(new_cgroup);
+       if (freezer->state != CGROUP_THAWED)
                return -EBUSY;
 
-       freezer = cgroup_freezer(new_cgroup);
-       if (freezer->state == CGROUP_FROZEN)
+       rcu_read_lock();
+       if (__cgroup_freezing_or_frozen(task)) {
+               rcu_read_unlock();
                return -EBUSY;
+       }
+       rcu_read_unlock();
 
        if (threadgroup) {
                struct task_struct *c;
 
                rcu_read_lock();
                list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
-                       if (is_task_frozen_enough(c)) {
+                       if (__cgroup_freezing_or_frozen(c)) {
                                rcu_read_unlock();
                                return -EBUSY;
                        }
@@ -236,31 +229,30 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 /*
  * caller must hold freezer->lock
  */
-static void update_freezer_state(struct cgroup *cgroup,
+static void update_if_frozen(struct cgroup *cgroup,
                                 struct freezer *freezer)
 {
        struct cgroup_iter it;
        struct task_struct *task;
        unsigned int nfrozen = 0, ntotal = 0;
+       enum freezer_state old_state = freezer->state;
 
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (is_task_frozen_enough(task))
+               if (frozen(task))
                        nfrozen++;
        }
 
-       /*
-        * Transition to FROZEN when no new tasks can be added ensures
-        * that we never exist in the FROZEN state while there are unfrozen
-        * tasks.
-        */
-       if (nfrozen == ntotal)
-               freezer->state = CGROUP_FROZEN;
-       else if (nfrozen > 0)
-               freezer->state = CGROUP_FREEZING;
-       else
-               freezer->state = CGROUP_THAWED;
+       if (old_state == CGROUP_THAWED) {
+               BUG_ON(nfrozen > 0);
+       } else if (old_state == CGROUP_FREEZING) {
+               if (nfrozen == ntotal)
+                       freezer->state = CGROUP_FROZEN;
+       } else { /* old_state == CGROUP_FROZEN */
+               BUG_ON(nfrozen != ntotal);
+       }
+
        cgroup_iter_end(cgroup, &it);
 }
 
@@ -279,7 +271,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
        if (state == CGROUP_FREEZING) {
                /* We change from FREEZING to FROZEN lazily if the cgroup was
                 * only partially frozen when we exitted write. */
-               update_freezer_state(cgroup, freezer);
+               update_if_frozen(cgroup, freezer);
                state = freezer->state;
        }
        spin_unlock_irq(&freezer->lock);
@@ -301,7 +293,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        while ((task = cgroup_iter_next(cgroup, &it))) {
                if (!freeze_task(task, true))
                        continue;
-               if (is_task_frozen_enough(task))
+               if (frozen(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
@@ -335,7 +327,7 @@ static int freezer_change_state(struct cgroup *cgroup,
 
        spin_lock_irq(&freezer->lock);
 
-       update_freezer_state(cgroup, freezer);
+       update_if_frozen(cgroup, freezer);
        if (goal_state == freezer->state)
                goto out;
 
index 9a3e226..6a1aa00 100644 (file)
@@ -325,7 +325,7 @@ EXPORT_SYMBOL(prepare_creds);
 
 /*
  * Prepare credentials for current to perform an execve()
- * - The caller must hold current->cred_guard_mutex
+ * - The caller must hold ->cred_guard_mutex
  */
 struct cred *prepare_exec_creds(void)
 {
@@ -384,8 +384,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
        struct cred *new;
        int ret;
 
-       mutex_init(&p->cred_guard_mutex);
-
        if (
 #ifdef CONFIG_KEYS
                !p->cred->thread_keyring &&
index 894179a..b194feb 100644 (file)
@@ -703,6 +703,8 @@ static void exit_mm(struct task_struct * tsk)
  * space.
  */
 static struct task_struct *find_new_reaper(struct task_struct *father)
+       __releases(&tasklist_lock)
+       __acquires(&tasklist_lock)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(father);
        struct task_struct *thread;
index e87aaaa..3b159c5 100644 (file)
@@ -908,6 +908,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->oom_adj = current->signal->oom_adj;
        sig->oom_score_adj = current->signal->oom_score_adj;
 
+       mutex_init(&sig->cred_guard_mutex);
+
        return 0;
 }
 
index 9d917ff..9988d03 100644 (file)
@@ -393,3 +393,18 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
        struct irq_desc *desc = irq_to_desc(irq);
        return desc ? desc->kstat_irqs[cpu] : 0;
 }
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+unsigned int kstat_irqs(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       int cpu;
+       int sum = 0;
+
+       if (!desc)
+               return 0;
+       for_each_possible_cpu(cpu)
+               sum += desc->kstat_irqs[cpu];
+       return sum;
+}
+#endif /* CONFIG_GENERIC_HARDIRQS */
index 56a8919..99865c3 100644 (file)
@@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 /* NOTE: change this value only with kprobe_mutex held */
 static bool kprobes_all_disarmed;
 
-static DEFINE_MUTEX(kprobe_mutex);     /* Protects kprobe_table */
+/* This protects kprobe_table and optimizing_list */
+static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 static struct {
        spinlock_t lock ____cacheline_aligned_in_smp;
@@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
 }
 
 #ifdef CONFIG_SYSCTL
+/* This should be called with kprobe_mutex locked */
 static void __kprobes optimize_all_kprobes(void)
 {
        struct hlist_head *head;
@@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void)
                return;
 
        kprobes_allow_optimization = true;
-       mutex_lock(&text_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                hlist_for_each_entry_rcu(p, node, head, hlist)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
-       mutex_unlock(&text_mutex);
        printk(KERN_INFO "Kprobes globally optimized\n");
 }
 
+/* This should be called with kprobe_mutex locked */
 static void __kprobes unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
index 2df4630..437a74a 100644 (file)
@@ -2037,7 +2037,7 @@ static inline void layout_symtab(struct module *mod, struct load_info *info)
 {
 }
 
-static void add_kallsyms(struct module *mod, struct load_info *info)
+static void add_kallsyms(struct module *mod, const struct load_info *info)
 {
 }
 #endif /* CONFIG_KALLSYMS */
index 2a5dfec..2c98ad9 100644 (file)
@@ -85,6 +85,14 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
                return ERR_PTR(-EPERM);
        if (!cgroup_is_descendant(cgroup, current))
                return ERR_PTR(-EPERM);
+       if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) {
+               printk("ns_cgroup can't be created with parent "
+                      "'clone_children' set.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       printk_once("ns_cgroup deprecated: consider using the "
+                   "'clone_children' flag without the ns_cgroup.\n");
 
        ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
        if (!ns_cgroup)
index f309e80..517d827 100644 (file)
@@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event)
        return event->cpu == -1 || event->cpu == smp_processor_id();
 }
 
-static int
-__event_sched_out(struct perf_event *event,
+static void
+event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
@@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event,
        }
 
        if (event->state != PERF_EVENT_STATE_ACTIVE)
-               return 0;
+               return;
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        if (event->pending_disable) {
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
+       event->tstamp_stopped = ctx->time;
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
@@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event,
        ctx->nr_active--;
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
-       return 1;
-}
-
-static void
-event_sched_out(struct perf_event *event,
-                 struct perf_cpu_context *cpuctx,
-                 struct perf_event_context *ctx)
-{
-       int ret;
-
-       ret = __event_sched_out(event, cpuctx, ctx);
-       if (ret)
-               event->tstamp_stopped = ctx->time;
 }
 
 static void
@@ -664,7 +652,7 @@ retry:
 }
 
 static int
-__event_sched_in(struct perf_event *event,
+event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
 {
@@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event,
                return -EAGAIN;
        }
 
+       event->tstamp_running += ctx->time - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        ctx->nr_active++;
@@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event,
        return 0;
 }
 
-static inline int
-event_sched_in(struct perf_event *event,
-                struct perf_cpu_context *cpuctx,
-                struct perf_event_context *ctx)
-{
-       int ret = __event_sched_in(event, cpuctx, ctx);
-       if (ret)
-               return ret;
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
-       return 0;
-}
-
-static void
-group_commit_event_sched_in(struct perf_event *group_event,
-              struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx)
-{
-       struct perf_event *event;
-       u64 now = ctx->time;
-
-       group_event->tstamp_running += now - group_event->tstamp_stopped;
-       /*
-        * Schedule in siblings as one group (if any):
-        */
-       list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               event->tstamp_running += now - event->tstamp_stopped;
-       }
-}
-
 static int
 group_sched_in(struct perf_event *group_event,
               struct perf_cpu_context *cpuctx,
@@ -730,19 +691,15 @@ group_sched_in(struct perf_event *group_event,
 {
        struct perf_event *event, *partial_group = NULL;
        struct pmu *pmu = group_event->pmu;
+       u64 now = ctx->time;
+       bool simulate = false;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
        pmu->start_txn(pmu);
 
-       /*
-        * use __event_sched_in() to delay updating tstamp_running
-        * until the transaction is committed. In case of failure
-        * we will keep an unmodified tstamp_running which is a
-        * requirement to get correct timing information
-        */
-       if (__event_sched_in(group_event, cpuctx, ctx)) {
+       if (event_sched_in(group_event, cpuctx, ctx)) {
                pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
@@ -751,31 +708,42 @@ group_sched_in(struct perf_event *group_event,
         * Schedule in siblings as one group (if any):
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               if (__event_sched_in(event, cpuctx, ctx)) {
+               if (event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
                }
        }
 
-       if (!pmu->commit_txn(pmu)) {
-               /* commit tstamp_running */
-               group_commit_event_sched_in(group_event, cpuctx, ctx);
+       if (!pmu->commit_txn(pmu))
                return 0;
-       }
+
 group_error:
        /*
         * Groups can be scheduled in as one unit only, so undo any
         * partial group before returning:
+        * The events up to the failed event are scheduled out normally,
+        * tstamp_stopped will be updated.
         *
-        * use __event_sched_out() to avoid updating tstamp_stopped
-        * because the event never actually ran
+        * The failed events and the remaining siblings need to have
+        * their timings updated as if they had gone thru event_sched_in()
+        * and event_sched_out(). This is required to get consistent timings
+        * across the group. This also takes care of the case where the group
+        * could never be scheduled by ensuring tstamp_stopped is set to mark
+        * the time the event was actually stopped, such that time delta
+        * calculation in update_event_times() is correct.
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
                if (event == partial_group)
-                       break;
-               __event_sched_out(event, cpuctx, ctx);
+                       simulate = true;
+
+               if (simulate) {
+                       event->tstamp_running += now - event->tstamp_stopped;
+                       event->tstamp_stopped = now;
+               } else {
+                       event_sched_out(event, cpuctx, ctx);
+               }
        }
-       __event_sched_out(group_event, cpuctx, ctx);
+       event_sched_out(group_event, cpuctx, ctx);
 
        pmu->cancel_txn(pmu);
 
index f34d798..99bbaa3 100644 (file)
@@ -181,7 +181,7 @@ int ptrace_attach(struct task_struct *task)
         * under ptrace.
         */
        retval = -ERESTARTNOINTR;
-       if (mutex_lock_interruptible(&task->cred_guard_mutex))
+       if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
                goto out;
 
        task_lock(task);
@@ -208,7 +208,7 @@ int ptrace_attach(struct task_struct *task)
 unlock_tasklist:
        write_unlock_irq(&tasklist_lock);
 unlock_creds:
-       mutex_unlock(&task->cred_guard_mutex);
+       mutex_unlock(&task->signal->cred_guard_mutex);
 out:
        return retval;
 }
@@ -329,6 +329,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
  * and reacquire the lock.
  */
 void exit_ptrace(struct task_struct *tracer)
+       __releases(&tasklist_lock)
+       __acquires(&tasklist_lock)
 {
        struct task_struct *p, *n;
        LIST_HEAD(ptrace_dead);
@@ -402,7 +404,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
        return copied;
 }
 
-static int ptrace_setoptions(struct task_struct *child, long data)
+static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 {
        child->ptrace &= ~PT_TRACE_MASK;
 
@@ -481,7 +483,8 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 #define is_sysemu_singlestep(request)  0
 #endif
 
-static int ptrace_resume(struct task_struct *child, long request, long data)
+static int ptrace_resume(struct task_struct *child, long request,
+                        unsigned long data)
 {
        if (!valid_signal(data))
                return -EIO;
@@ -558,10 +561,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 #endif
 
 int ptrace_request(struct task_struct *child, long request,
-                  long addr, long data)
+                  unsigned long addr, unsigned long data)
 {
        int ret = -EIO;
        siginfo_t siginfo;
+       void __user *datavp = (void __user *) data;
+       unsigned long __user *datalp = datavp;
 
        switch (request) {
        case PTRACE_PEEKTEXT:
@@ -578,19 +583,17 @@ int ptrace_request(struct task_struct *child, long request,
                ret = ptrace_setoptions(child, data);
                break;
        case PTRACE_GETEVENTMSG:
-               ret = put_user(child->ptrace_message, (unsigned long __user *) data);
+               ret = put_user(child->ptrace_message, datalp);
                break;
 
        case PTRACE_GETSIGINFO:
                ret = ptrace_getsiginfo(child, &siginfo);
                if (!ret)
-                       ret = copy_siginfo_to_user((siginfo_t __user *) data,
-                                                  &siginfo);
+                       ret = copy_siginfo_to_user(datavp, &siginfo);
                break;
 
        case PTRACE_SETSIGINFO:
-               if (copy_from_user(&siginfo, (siginfo_t __user *) data,
-                                  sizeof siginfo))
+               if (copy_from_user(&siginfo, datavp, sizeof siginfo))
                        ret = -EFAULT;
                else
                        ret = ptrace_setsiginfo(child, &siginfo);
@@ -621,7 +624,7 @@ int ptrace_request(struct task_struct *child, long request,
                }
                mmput(mm);
 
-               ret = put_user(tmp, (unsigned long __user *) data);
+               ret = put_user(tmp, datalp);
                break;
        }
 #endif
@@ -650,7 +653,7 @@ int ptrace_request(struct task_struct *child, long request,
        case PTRACE_SETREGSET:
        {
                struct iovec kiov;
-               struct iovec __user *uiov = (struct iovec __user *) data;
+               struct iovec __user *uiov = datavp;
 
                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
                        return -EFAULT;
@@ -691,7 +694,8 @@ static struct task_struct *ptrace_get_task_struct(pid_t pid)
 #define arch_ptrace_attach(child)      do { } while (0)
 #endif
 
-SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+               unsigned long, data)
 {
        struct task_struct *child;
        long ret;
@@ -732,7 +736,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
        return ret;
 }
 
-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data)
 {
        unsigned long tmp;
        int copied;
@@ -743,7 +748,8 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
        return put_user(tmp, (unsigned long __user *)data);
 }
 
-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data)
 {
        int copied;
 
index 7b36976..9c9841c 100644 (file)
@@ -453,6 +453,8 @@ static struct resource * __insert_resource(struct resource *parent, struct resou
 
                if (first == parent)
                        return first;
+               if (WARN_ON(first == new))      /* duplicated insertion */
+                       return first;
 
                if ((first->start > new->start) || (first->end < new->end))
                        break;
index 919562c..4e3cff1 100644 (file)
@@ -1105,7 +1105,8 @@ int zap_other_threads(struct task_struct *p)
        return count;
 }
 
-struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+                                          unsigned long *flags)
 {
        struct sighand_struct *sighand;
 
@@ -1617,6 +1618,8 @@ static int sigkill_pending(struct task_struct *tsk)
  * is gone, we keep current->exit_code unless clear_code.
  */
 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
+       __releases(&current->sighand->siglock)
+       __acquires(&current->sighand->siglock)
 {
        if (arch_ptrace_stop_needed(exit_code, info)) {
                /*
index f02a9df..18f4be0 100644 (file)
@@ -229,18 +229,20 @@ restart:
 
        do {
                if (pending & 1) {
+                       unsigned int vec_nr = h - softirq_vec;
                        int prev_count = preempt_count();
-                       kstat_incr_softirqs_this_cpu(h - softirq_vec);
 
-                       trace_softirq_entry(h, softirq_vec);
+                       kstat_incr_softirqs_this_cpu(vec_nr);
+
+                       trace_softirq_entry(vec_nr);
                        h->action(h);
-                       trace_softirq_exit(h, softirq_vec);
+                       trace_softirq_exit(vec_nr);
                        if (unlikely(prev_count != preempt_count())) {
-                               printk(KERN_ERR "huh, entered softirq %td %s %p"
+                               printk(KERN_ERR "huh, entered softirq %u %s %p"
                                       "with preempt_count %08x,"
-                                      " exited with %08x?\n", h - softirq_vec,
-                                      softirq_to_name[h - softirq_vec],
-                                      h->action, prev_count, preempt_count());
+                                      " exited with %08x?\n", vec_nr,
+                                      softirq_to_name[vec_nr], h->action,
+                                      prev_count, preempt_count());
                                preempt_count() = prev_count;
                        }
 
index 11281d5..c8231fb 100644 (file)
@@ -175,22 +175,8 @@ static void send_cpu_listeners(struct sk_buff *skb,
        up_write(&listeners->sem);
 }
 
-static int fill_pid(pid_t pid, struct task_struct *tsk,
-               struct taskstats *stats)
+static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
 {
-       int rc = 0;
-
-       if (!tsk) {
-               rcu_read_lock();
-               tsk = find_task_by_vpid(pid);
-               if (tsk)
-                       get_task_struct(tsk);
-               rcu_read_unlock();
-               if (!tsk)
-                       return -ESRCH;
-       } else
-               get_task_struct(tsk);
-
        memset(stats, 0, sizeof(*stats));
        /*
         * Each accounting subsystem adds calls to its functions to
@@ -209,17 +195,27 @@ static int fill_pid(pid_t pid, struct task_struct *tsk,
 
        /* fill in extended acct fields */
        xacct_add_tsk(stats, tsk);
+}
 
-       /* Define err: label here if needed */
-       put_task_struct(tsk);
-       return rc;
+static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
+{
+       struct task_struct *tsk;
 
+       rcu_read_lock();
+       tsk = find_task_by_vpid(pid);
+       if (tsk)
+               get_task_struct(tsk);
+       rcu_read_unlock();
+       if (!tsk)
+               return -ESRCH;
+       fill_stats(tsk, stats);
+       put_task_struct(tsk);
+       return 0;
 }
 
-static int fill_tgid(pid_t tgid, struct task_struct *first,
-               struct taskstats *stats)
+static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
 {
-       struct task_struct *tsk;
+       struct task_struct *tsk, *first;
        unsigned long flags;
        int rc = -ESRCH;
 
@@ -228,8 +224,7 @@ static int fill_tgid(pid_t tgid, struct task_struct *first,
         * leaders who are already counted with the dead tasks
         */
        rcu_read_lock();
-       if (!first)
-               first = find_task_by_vpid(tgid);
+       first = find_task_by_vpid(tgid);
 
        if (!first || !lock_task_sighand(first, &flags))
                goto out;
@@ -268,7 +263,6 @@ out:
        return rc;
 }
 
-
 static void fill_tgid_exit(struct task_struct *tsk)
 {
        unsigned long flags;
@@ -360,6 +354,12 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
        struct nlattr *na, *ret;
        int aggr;
 
+       /* If we don't pad, we end up with alignment on a 4 byte boundary.
+        * This causes lots of runtime warnings on systems requiring 8 byte
+        * alignment */
+       u32 pids[2] = { pid, 0 };
+       int pid_size = ALIGN(sizeof(pid), sizeof(long));
+
        aggr = (type == TASKSTATS_TYPE_PID)
                        ? TASKSTATS_TYPE_AGGR_PID
                        : TASKSTATS_TYPE_AGGR_TGID;
@@ -367,7 +367,7 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
        na = nla_nest_start(skb, aggr);
        if (!na)
                goto err;
-       if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+       if (nla_put(skb, type, pid_size, pids) < 0)
                goto err;
        ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
        if (!ret)
@@ -424,39 +424,46 @@ err:
        return rc;
 }
 
-static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+static int cmd_attr_register_cpumask(struct genl_info *info)
 {
-       int rc;
-       struct sk_buff *rep_skb;
-       struct taskstats *stats;
-       size_t size;
        cpumask_var_t mask;
+       int rc;
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
                return -ENOMEM;
-
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
        if (rc < 0)
-               goto free_return_rc;
-       if (rc == 0) {
-               rc = add_del_listener(info->snd_pid, mask, REGISTER);
-               goto free_return_rc;
-       }
+               goto out;
+       rc = add_del_listener(info->snd_pid, mask, REGISTER);
+out:
+       free_cpumask_var(mask);
+       return rc;
+}
+
+static int cmd_attr_deregister_cpumask(struct genl_info *info)
+{
+       cpumask_var_t mask;
+       int rc;
 
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
        if (rc < 0)
-               goto free_return_rc;
-       if (rc == 0) {
-               rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
-free_return_rc:
-               free_cpumask_var(mask);
-               return rc;
-       }
+               goto out;
+       rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
+out:
        free_cpumask_var(mask);
+       return rc;
+}
+
+static int cmd_attr_pid(struct genl_info *info)
+{
+       struct taskstats *stats;
+       struct sk_buff *rep_skb;
+       size_t size;
+       u32 pid;
+       int rc;
 
-       /*
-        * Size includes space for nested attributes
-        */
        size = nla_total_size(sizeof(u32)) +
                nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
@@ -465,33 +472,64 @@ free_return_rc:
                return rc;
 
        rc = -EINVAL;
-       if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
-               u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
-               stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
-               if (!stats)
-                       goto err;
-
-               rc = fill_pid(pid, NULL, stats);
-               if (rc < 0)
-                       goto err;
-       } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
-               u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
-               stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
-               if (!stats)
-                       goto err;
-
-               rc = fill_tgid(tgid, NULL, stats);
-               if (rc < 0)
-                       goto err;
-       } else
+       pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
+       if (!stats)
+               goto err;
+
+       rc = fill_stats_for_pid(pid, stats);
+       if (rc < 0)
+               goto err;
+       return send_reply(rep_skb, info);
+err:
+       nlmsg_free(rep_skb);
+       return rc;
+}
+
+static int cmd_attr_tgid(struct genl_info *info)
+{
+       struct taskstats *stats;
+       struct sk_buff *rep_skb;
+       size_t size;
+       u32 tgid;
+       int rc;
+
+       size = nla_total_size(sizeof(u32)) +
+               nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
+
+       rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
+       if (rc < 0)
+               return rc;
+
+       rc = -EINVAL;
+       tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
+       if (!stats)
                goto err;
 
+       rc = fill_stats_for_tgid(tgid, stats);
+       if (rc < 0)
+               goto err;
        return send_reply(rep_skb, info);
 err:
        nlmsg_free(rep_skb);
        return rc;
 }
 
+static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+       if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+               return cmd_attr_register_cpumask(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+               return cmd_attr_deregister_cpumask(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
+               return cmd_attr_pid(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
+               return cmd_attr_tgid(info);
+       else
+               return -EINVAL;
+}
+
 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
        struct signal_struct *sig = tsk->signal;
@@ -555,9 +593,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
        if (!stats)
                goto err;
 
-       rc = fill_pid(-1, tsk, stats);
-       if (rc < 0)
-               goto err;
+       fill_stats(tsk, stats);
 
        /*
         * Doesn't matter if tsk is the leader or the last group member leaving
index c3dab05..9ed509a 100644 (file)
@@ -224,6 +224,9 @@ enum {
        RB_LEN_TIME_STAMP = 16,
 };
 
+#define skip_time_extend(event) \
+       ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
+
 static inline int rb_null_event(struct ring_buffer_event *event)
 {
        return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
@@ -248,8 +251,12 @@ rb_event_data_length(struct ring_buffer_event *event)
        return length + RB_EVNT_HDR_SIZE;
 }
 
-/* inline for ring buffer fast paths */
-static unsigned
+/*
+ * Return the length of the given event. Will return
+ * the length of the time extend if the event is a
+ * time extend.
+ */
+static inline unsigned
 rb_event_length(struct ring_buffer_event *event)
 {
        switch (event->type_len) {
@@ -274,13 +281,41 @@ rb_event_length(struct ring_buffer_event *event)
        return 0;
 }
 
+/*
+ * Return total length of time extend and data,
+ *   or just the event length for all other events.
+ */
+static inline unsigned
+rb_event_ts_length(struct ring_buffer_event *event)
+{
+       unsigned len = 0;
+
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+               /* time extends include the data event after it */
+               len = RB_LEN_TIME_EXTEND;
+               event = skip_time_extend(event);
+       }
+       return len + rb_event_length(event);
+}
+
 /**
  * ring_buffer_event_length - return the length of the event
  * @event: the event to get the length of
+ *
+ * Returns the size of the data load of a data event.
+ * If the event is something other than a data event, it
+ * returns the size of the event itself. With the exception
+ * of a TIME EXTEND, where it still returns the size of the
+ * data load of the data event after it.
  */
 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 {
-       unsigned length = rb_event_length(event);
+       unsigned length;
+
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+               event = skip_time_extend(event);
+
+       length = rb_event_length(event);
        if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
                return length;
        length -= RB_EVNT_HDR_SIZE;
@@ -294,6 +329,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 static void *
 rb_event_data(struct ring_buffer_event *event)
 {
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+               event = skip_time_extend(event);
        BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
        /* If length is in len field, then array[0] has the data */
        if (event->type_len)
@@ -404,9 +441,6 @@ static inline int test_time_stamp(u64 delta)
 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 
-/* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
-
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
        struct buffer_data_page field;
@@ -1546,6 +1580,25 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
        iter->head = 0;
 }
 
+/* Slow path, do not inline */
+static noinline struct ring_buffer_event *
+rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
+{
+       event->type_len = RINGBUF_TYPE_TIME_EXTEND;
+
+       /* Not the first event on the page? */
+       if (rb_event_index(event)) {
+               event->time_delta = delta & TS_MASK;
+               event->array[0] = delta >> TS_SHIFT;
+       } else {
+               /* nope, just zero it */
+               event->time_delta = 0;
+               event->array[0] = 0;
+       }
+
+       return skip_time_extend(event);
+}
+
 /**
  * ring_buffer_update_event - update event type and data
  * @event: the even to update
@@ -1558,28 +1611,31 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
  * data field.
  */
 static void
-rb_update_event(struct ring_buffer_event *event,
-                        unsigned type, unsigned length)
+rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
+               struct ring_buffer_event *event, unsigned length,
+               int add_timestamp, u64 delta)
 {
-       event->type_len = type;
-
-       switch (type) {
-
-       case RINGBUF_TYPE_PADDING:
-       case RINGBUF_TYPE_TIME_EXTEND:
-       case RINGBUF_TYPE_TIME_STAMP:
-               break;
+       /* Only a commit updates the timestamp */
+       if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
+               delta = 0;
 
-       case 0:
-               length -= RB_EVNT_HDR_SIZE;
-               if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
-                       event->array[0] = length;
-               else
-                       event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
-               break;
-       default:
-               BUG();
+       /*
+        * If we need to add a timestamp, then we
+        * add it to the start of the resevered space.
+        */
+       if (unlikely(add_timestamp)) {
+               event = rb_add_time_stamp(event, delta);
+               length -= RB_LEN_TIME_EXTEND;
+               delta = 0;
        }
+
+       event->time_delta = delta;
+       length -= RB_EVNT_HDR_SIZE;
+       if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
+               event->type_len = 0;
+               event->array[0] = length;
+       } else
+               event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
 }
 
 /*
@@ -1823,10 +1879,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
        local_sub(length, &tail_page->write);
 }
 
-static struct ring_buffer_event *
+/*
+ * This is the slow path, force gcc not to inline it.
+ */
+static noinline struct ring_buffer_event *
 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
             unsigned long length, unsigned long tail,
-            struct buffer_page *tail_page, u64 *ts)
+            struct buffer_page *tail_page, u64 ts)
 {
        struct buffer_page *commit_page = cpu_buffer->commit_page;
        struct ring_buffer *buffer = cpu_buffer->buffer;
@@ -1909,8 +1968,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
                 * Nested commits always have zero deltas, so
                 * just reread the time stamp
                 */
-               *ts = rb_time_stamp(buffer);
-               next_page->page->time_stamp = *ts;
+               ts = rb_time_stamp(buffer);
+               next_page->page->time_stamp = ts;
        }
 
  out_again:
@@ -1929,12 +1988,21 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
 
 static struct ring_buffer_event *
 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
-                 unsigned type, unsigned long length, u64 *ts)
+                 unsigned long length, u64 ts,
+                 u64 delta, int add_timestamp)
 {
        struct buffer_page *tail_page;
        struct ring_buffer_event *event;
        unsigned long tail, write;
 
+       /*
+        * If the time delta since the last event is too big to
+        * hold in the time field of the event, then we append a
+        * TIME EXTEND event ahead of the data event.
+        */
+       if (unlikely(add_timestamp))
+               length += RB_LEN_TIME_EXTEND;
+
        tail_page = cpu_buffer->tail_page;
        write = local_add_return(length, &tail_page->write);
 
@@ -1943,7 +2011,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        tail = write - length;
 
        /* See if we shot pass the end of this buffer page */
-       if (write > BUF_PAGE_SIZE)
+       if (unlikely(write > BUF_PAGE_SIZE))
                return rb_move_tail(cpu_buffer, length, tail,
                                    tail_page, ts);
 
@@ -1951,18 +2019,16 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
        event = __rb_page_index(tail_page, tail);
        kmemcheck_annotate_bitfield(event, bitfield);
-       rb_update_event(event, type, length);
+       rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
 
-       /* The passed in type is zero for DATA */
-       if (likely(!type))
-               local_inc(&tail_page->entries);
+       local_inc(&tail_page->entries);
 
        /*
         * If this is the first commit on the page, then update
         * its timestamp.
         */
        if (!tail)
-               tail_page->page->time_stamp = *ts;
+               tail_page->page->time_stamp = ts;
 
        return event;
 }
@@ -1977,7 +2043,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        unsigned long addr;
 
        new_index = rb_event_index(event);
-       old_index = new_index + rb_event_length(event);
+       old_index = new_index + rb_event_ts_length(event);
        addr = (unsigned long)event;
        addr &= PAGE_MASK;
 
@@ -2003,76 +2069,13 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        return 0;
 }
 
-static int
-rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
-                 u64 *ts, u64 *delta)
-{
-       struct ring_buffer_event *event;
-       int ret;
-
-       WARN_ONCE(*delta > (1ULL << 59),
-                 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
-                 (unsigned long long)*delta,
-                 (unsigned long long)*ts,
-                 (unsigned long long)cpu_buffer->write_stamp);
-
-       /*
-        * The delta is too big, we to add a
-        * new timestamp.
-        */
-       event = __rb_reserve_next(cpu_buffer,
-                                 RINGBUF_TYPE_TIME_EXTEND,
-                                 RB_LEN_TIME_EXTEND,
-                                 ts);
-       if (!event)
-               return -EBUSY;
-
-       if (PTR_ERR(event) == -EAGAIN)
-               return -EAGAIN;
-
-       /* Only a commited time event can update the write stamp */
-       if (rb_event_is_commit(cpu_buffer, event)) {
-               /*
-                * If this is the first on the page, then it was
-                * updated with the page itself. Try to discard it
-                * and if we can't just make it zero.
-                */
-               if (rb_event_index(event)) {
-                       event->time_delta = *delta & TS_MASK;
-                       event->array[0] = *delta >> TS_SHIFT;
-               } else {
-                       /* try to discard, since we do not need this */
-                       if (!rb_try_to_discard(cpu_buffer, event)) {
-                               /* nope, just zero it */
-                               event->time_delta = 0;
-                               event->array[0] = 0;
-                       }
-               }
-               cpu_buffer->write_stamp = *ts;
-               /* let the caller know this was the commit */
-               ret = 1;
-       } else {
-               /* Try to discard the event */
-               if (!rb_try_to_discard(cpu_buffer, event)) {
-                       /* Darn, this is just wasted space */
-                       event->time_delta = 0;
-                       event->array[0] = 0;
-               }
-               ret = 0;
-       }
-
-       *delta = 0;
-
-       return ret;
-}
-
 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
        local_inc(&cpu_buffer->committing);
        local_inc(&cpu_buffer->commits);
 }
 
-static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
        unsigned long commits;
 
@@ -2110,9 +2113,10 @@ rb_reserve_next_event(struct ring_buffer *buffer,
                      unsigned long length)
 {
        struct ring_buffer_event *event;
-       u64 ts, delta = 0;
-       int commit = 0;
+       u64 ts, delta;
        int nr_loops = 0;
+       int add_timestamp;
+       u64 diff;
 
        rb_start_commit(cpu_buffer);
 
@@ -2133,6 +2137,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
        length = rb_calculate_event_length(length);
  again:
+       add_timestamp = 0;
+       delta = 0;
+
        /*
         * We allow for interrupts to reenter here and do a trace.
         * If one does, it will cause this original code to loop
@@ -2146,56 +2153,32 @@ rb_reserve_next_event(struct ring_buffer *buffer,
                goto out_fail;
 
        ts = rb_time_stamp(cpu_buffer->buffer);
+       diff = ts - cpu_buffer->write_stamp;
 
-       /*
-        * Only the first commit can update the timestamp.
-        * Yes there is a race here. If an interrupt comes in
-        * just after the conditional and it traces too, then it
-        * will also check the deltas. More than one timestamp may
-        * also be made. But only the entry that did the actual
-        * commit will be something other than zero.
-        */
-       if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
-                  rb_page_write(cpu_buffer->tail_page) ==
-                  rb_commit_index(cpu_buffer))) {
-               u64 diff;
-
-               diff = ts - cpu_buffer->write_stamp;
-
-               /* make sure this diff is calculated here */
-               barrier();
-
-               /* Did the write stamp get updated already? */
-               if (unlikely(ts < cpu_buffer->write_stamp))
-                       goto get_event;
+       /* make sure this diff is calculated here */
+       barrier();
 
+       /* Did the write stamp get updated already? */
+       if (likely(ts >= cpu_buffer->write_stamp)) {
                delta = diff;
                if (unlikely(test_time_stamp(delta))) {
-
-                       commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
-                       if (commit == -EBUSY)
-                               goto out_fail;
-
-                       if (commit == -EAGAIN)
-                               goto again;
-
-                       RB_WARN_ON(cpu_buffer, commit < 0);
+                       WARN_ONCE(delta > (1ULL << 59),
+                                 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
+                                 (unsigned long long)delta,
+                                 (unsigned long long)ts,
+                                 (unsigned long long)cpu_buffer->write_stamp);
+                       add_timestamp = 1;
                }
        }
 
- get_event:
-       event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
+       event = __rb_reserve_next(cpu_buffer, length, ts,
+                                 delta, add_timestamp);
        if (unlikely(PTR_ERR(event) == -EAGAIN))
                goto again;
 
        if (!event)
                goto out_fail;
 
-       if (!rb_event_is_commit(cpu_buffer, event))
-               delta = 0;
-
-       event->time_delta = delta;
-
        return event;
 
  out_fail:
@@ -2207,13 +2190,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
 #define TRACE_RECURSIVE_DEPTH 16
 
-static int trace_recursive_lock(void)
+/* Keep this code out of the fast path cache */
+static noinline void trace_recursive_fail(void)
 {
-       current->trace_recursion++;
-
-       if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
-               return 0;
-
        /* Disable all tracing before we do anything else */
        tracing_off_permanent();
 
@@ -2225,10 +2204,21 @@ static int trace_recursive_lock(void)
                    in_nmi());
 
        WARN_ON_ONCE(1);
+}
+
+static inline int trace_recursive_lock(void)
+{
+       current->trace_recursion++;
+
+       if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
+               return 0;
+
+       trace_recursive_fail();
+
        return -1;
 }
 
-static void trace_recursive_unlock(void)
+static inline void trace_recursive_unlock(void)
 {
        WARN_ON_ONCE(!current->trace_recursion);
 
@@ -2308,12 +2298,28 @@ static void
 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
                      struct ring_buffer_event *event)
 {
+       u64 delta;
+
        /*
         * The event first in the commit queue updates the
         * time stamp.
         */
-       if (rb_event_is_commit(cpu_buffer, event))
-               cpu_buffer->write_stamp += event->time_delta;
+       if (rb_event_is_commit(cpu_buffer, event)) {
+               /*
+                * A commit event that is first on a page
+                * updates the write timestamp with the page stamp
+                */
+               if (!rb_event_index(event))
+                       cpu_buffer->write_stamp =
+                               cpu_buffer->commit_page->page->time_stamp;
+               else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+                       delta = event->array[0];
+                       delta <<= TS_SHIFT;
+                       delta += event->time_delta;
+                       cpu_buffer->write_stamp += delta;
+               } else
+                       cpu_buffer->write_stamp += event->time_delta;
+       }
 }
 
 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
@@ -2353,6 +2359,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
 
 static inline void rb_event_discard(struct ring_buffer_event *event)
 {
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+               event = skip_time_extend(event);
+
        /* array[0] holds the actual length for the discarded event */
        event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
        event->type_len = RINGBUF_TYPE_PADDING;
@@ -3049,12 +3058,12 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
 
  again:
        /*
-        * We repeat when a timestamp is encountered. It is possible
-        * to get multiple timestamps from an interrupt entering just
-        * as one timestamp is about to be written, or from discarded
-        * commits. The most that we can have is the number on a single page.
+        * We repeat when a time extend is encountered.
+        * Since the time extend is always attached to a data event,
+        * we should never loop more than once.
+        * (We never hit the following condition more than twice).
         */
-       if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
                return NULL;
 
        reader = rb_get_reader_page(cpu_buffer);
@@ -3130,14 +3139,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
                return NULL;
 
        /*
-        * We repeat when a timestamp is encountered.
-        * We can get multiple timestamps by nested interrupts or also
-        * if filtering is on (discarding commits). Since discarding
-        * commits can be frequent we can get a lot of timestamps.
-        * But we limit them by not adding timestamps if they begin
-        * at the start of a page.
+        * We repeat when a time extend is encountered.
+        * Since the time extend is always attached to a data event,
+        * we should never loop more than once.
+        * (We never hit the following condition more than twice).
         */
-       if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
                return NULL;
 
        if (rb_per_cpu_empty(cpu_buffer))
@@ -3835,7 +3842,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                if (len > (commit - read))
                        len = (commit - read);
 
-               size = rb_event_length(event);
+               /* Always keep the time extend and data together */
+               size = rb_event_ts_length(event);
 
                if (len < size)
                        goto out_unlock;
@@ -3857,7 +3865,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                                break;
 
                        event = rb_reader_event(cpu_buffer);
-                       size = rb_event_length(event);
+                       /* Always keep the time extend and data together */
+                       size = rb_event_ts_length(event);
                } while (len > size);
 
                /* update bpage */
index 001bcd2..82d9b81 100644 (file)
@@ -3996,13 +3996,9 @@ static void tracing_init_debugfs_percpu(long cpu)
 {
        struct dentry *d_percpu = tracing_dentry_percpu();
        struct dentry *d_cpu;
-       /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
-       char cpu_dir[7];
+       char cpu_dir[30]; /* 30 characters should be more than enough */
 
-       if (cpu > 999 || cpu < 0)
-               return;
-
-       sprintf(cpu_dir, "cpu%ld", cpu);
+       snprintf(cpu_dir, 30, "cpu%ld", cpu);
        d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
        if (!d_cpu) {
                pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
index b8d2852..2dec9bc 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/perf_event.h>
 #include <linux/stringify.h>
 #include <linux/limits.h>
-#include <linux/uaccess.h>
 #include <asm/bitsperlong.h>
 
 #include "trace.h"
index 0a67e04..24dc60d 100644 (file)
@@ -63,12 +63,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
        stats->ac_ppid   = pid_alive(tsk) ?
                                rcu_dereference(tsk->real_parent)->tgid : 0;
        rcu_read_unlock();
-       stats->ac_utime  = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
-       stats->ac_stime  = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
-       stats->ac_utimescaled =
-               cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
-       stats->ac_stimescaled =
-               cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
+       stats->ac_utime = cputime_to_usecs(tsk->utime);
+       stats->ac_stime = cputime_to_usecs(tsk->stime);
+       stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled);
+       stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled);
        stats->ac_minflt = tsk->min_flt;
        stats->ac_majflt = tsk->maj_flt;
 
index 781e754..693394d 100644 (file)
 #include <linux/kgdb.h>
 #include <asm/tlbflush.h>
 
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+#endif
+
 /*
  * Virtual_count is not a pure "count".
  *  0 means that it is not mapped, and has not been mapped
@@ -43,7 +48,6 @@ unsigned long totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(totalhigh_pages);
 
 
-DEFINE_PER_CPU(int, __kmap_atomic_idx);
 EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
 
 unsigned int nr_free_highpages (void)
index 9be3cf8..9a99cfa 100644 (file)
@@ -89,7 +89,10 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
        MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
-       MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
+       MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
+       /* incremented at every  pagein/pageout */
+       MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
+       MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
 
        MEM_CGROUP_STAT_NSTATS,
 };
@@ -254,6 +257,12 @@ struct mem_cgroup {
         * percpu counter.
         */
        struct mem_cgroup_stat_cpu *stat;
+       /*
+        * used when a cpu is offlined or other synchronizations
+        * See mem_cgroup_read_stat().
+        */
+       struct mem_cgroup_stat_cpu nocpu_base;
+       spinlock_t pcp_counter_lock;
 };
 
 /* Stuffs for move charges at task migration. */
@@ -530,14 +539,40 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
        return mz;
 }
 
+/*
+ * Implementation Note: reading percpu statistics for memcg.
+ *
+ * Both of vmstat[] and percpu_counter has threshold and do periodic
+ * synchronization to implement "quick" read. There are trade-off between
+ * reading cost and precision of value. Then, we may have a chance to implement
+ * a periodic synchronizion of counter in memcg's counter.
+ *
+ * But this _read() function is used for user interface now. The user accounts
+ * memory usage by memory cgroup and he _always_ requires exact value because
+ * he accounts memory. Even if we provide quick-and-fuzzy read, we always
+ * have to visit all online cpus and make sum. So, for now, unnecessary
+ * synchronization is not implemented. (just implemented for cpu hotplug)
+ *
+ * If there are kernel internal actions which can make use of some not-exact
+ * value, and reading all cpu value can be performance bottleneck in some
+ * common workload, threashold and synchonization as vmstat[] should be
+ * implemented.
+ */
 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
                enum mem_cgroup_stat_index idx)
 {
        int cpu;
        s64 val = 0;
 
-       for_each_possible_cpu(cpu)
+       get_online_cpus();
+       for_each_online_cpu(cpu)
                val += per_cpu(mem->stat->count[idx], cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+       spin_lock(&mem->pcp_counter_lock);
+       val += mem->nocpu_base.count[idx];
+       spin_unlock(&mem->pcp_counter_lock);
+#endif
+       put_online_cpus();
        return val;
 }
 
@@ -659,40 +694,83 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return mem;
 }
 
-/*
- * Call callback function against all cgroup under hierarchy tree.
- */
-static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
-                         int (*func)(struct mem_cgroup *, void *))
+/* The caller has to guarantee "mem" exists before calling this */
+static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
 {
-       int found, ret, nextid;
        struct cgroup_subsys_state *css;
-       struct mem_cgroup *mem;
-
-       if (!root->use_hierarchy)
-               return (*func)(root, data);
+       int found;
 
-       nextid = 1;
-       do {
-               ret = 0;
+       if (!mem) /* ROOT cgroup has the smallest ID */
+               return root_mem_cgroup; /*css_put/get against root is ignored*/
+       if (!mem->use_hierarchy) {
+               if (css_tryget(&mem->css))
+                       return mem;
+               return NULL;
+       }
+       rcu_read_lock();
+       /*
+        * searching a memory cgroup which has the smallest ID under given
+        * ROOT cgroup. (ID >= 1)
+        */
+       css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
+       if (css && css_tryget(css))
+               mem = container_of(css, struct mem_cgroup, css);
+       else
                mem = NULL;
+       rcu_read_unlock();
+       return mem;
+}
+
+static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
+                                       struct mem_cgroup *root,
+                                       bool cond)
+{
+       int nextid = css_id(&iter->css) + 1;
+       int found;
+       int hierarchy_used;
+       struct cgroup_subsys_state *css;
+
+       hierarchy_used = iter->use_hierarchy;
 
+       css_put(&iter->css);
+       /* If no ROOT, walk all, ignore hierarchy */
+       if (!cond || (root && !hierarchy_used))
+               return NULL;
+
+       if (!root)
+               root = root_mem_cgroup;
+
+       do {
+               iter = NULL;
                rcu_read_lock();
-               css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
-                                  &found);
+
+               css = css_get_next(&mem_cgroup_subsys, nextid,
+                               &root->css, &found);
                if (css && css_tryget(css))
-                       mem = container_of(css, struct mem_cgroup, css);
+                       iter = container_of(css, struct mem_cgroup, css);
                rcu_read_unlock();
-
-               if (mem) {
-                       ret = (*func)(mem, data);
-                       css_put(&mem->css);
-               }
+               /* If css is NULL, no more cgroups will be found */
                nextid = found + 1;
-       } while (!ret && css);
+       } while (css && !iter);
 
-       return ret;
+       return iter;
 }
+/*
+ * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
+ * be careful that "break" loop is not allowed. We have reference count.
+ * Instead of that modify "cond" to be false and "continue" to exit the loop.
+ */
+#define for_each_mem_cgroup_tree_cond(iter, root, cond)        \
+       for (iter = mem_cgroup_start_loop(root);\
+            iter != NULL;\
+            iter = mem_cgroup_get_next(iter, root, cond))
+
+#define for_each_mem_cgroup_tree(iter, root) \
+       for_each_mem_cgroup_tree_cond(iter, root, true)
+
+#define for_each_mem_cgroup_all(iter) \
+       for_each_mem_cgroup_tree_cond(iter, NULL, true)
+
 
 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
 {
@@ -1051,7 +1129,52 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg)
        return swappiness;
 }
 
-/* A routine for testing mem is not under move_account */
+static void mem_cgroup_start_move(struct mem_cgroup *mem)
+{
+       int cpu;
+
+       get_online_cpus();
+       spin_lock(&mem->pcp_counter_lock);
+       for_each_online_cpu(cpu)
+               per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
+       mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
+       spin_unlock(&mem->pcp_counter_lock);
+       put_online_cpus();
+
+       synchronize_rcu();
+}
+
+static void mem_cgroup_end_move(struct mem_cgroup *mem)
+{
+       int cpu;
+
+       if (!mem)
+               return;
+       get_online_cpus();
+       spin_lock(&mem->pcp_counter_lock);
+       for_each_online_cpu(cpu)
+               per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
+       mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
+       spin_unlock(&mem->pcp_counter_lock);
+       put_online_cpus();
+}
+/*
+ * 2 routines for checking "mem" is under move_account() or not.
+ *
+ * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
+ *                       for avoiding race in accounting. If true,
+ *                       pc->mem_cgroup may be overwritten.
+ *
+ * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
+ *                       under hierarchy of moving cgroups. This is for
+ *                       waiting at hith-memory prressure caused by "move".
+ */
+
+static bool mem_cgroup_stealed(struct mem_cgroup *mem)
+{
+       VM_BUG_ON(!rcu_read_lock_held());
+       return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
+}
 
 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
 {
@@ -1092,13 +1215,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
        return false;
 }
 
-static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
-{
-       int *val = data;
-       (*val)++;
-       return 0;
-}
-
 /**
  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
  * @memcg: The memory cgroup that went over limit
@@ -1173,7 +1289,10 @@ done:
 static int mem_cgroup_count_children(struct mem_cgroup *mem)
 {
        int num = 0;
-       mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+       struct mem_cgroup *iter;
+
+       for_each_mem_cgroup_tree(iter, mem)
+               num++;
        return num;
 }
 
@@ -1322,49 +1441,39 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        return total;
 }
 
-static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
-{
-       int *val = (int *)data;
-       int x;
-       /*
-        * Logically, we can stop scanning immediately when we find
-        * a memcg is already locked. But condidering unlock ops and
-        * creation/removal of memcg, scan-all is simple operation.
-        */
-       x = atomic_inc_return(&mem->oom_lock);
-       *val = max(x, *val);
-       return 0;
-}
 /*
  * Check OOM-Killer is already running under our hierarchy.
  * If someone is running, return false.
  */
 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
 {
-       int lock_count = 0;
+       int x, lock_count = 0;
+       struct mem_cgroup *iter;
 
-       mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
+       for_each_mem_cgroup_tree(iter, mem) {
+               x = atomic_inc_return(&iter->oom_lock);
+               lock_count = max(x, lock_count);
+       }
 
        if (lock_count == 1)
                return true;
        return false;
 }
 
-static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
 {
+       struct mem_cgroup *iter;
+
        /*
         * When a new child is created while the hierarchy is under oom,
         * mem_cgroup_oom_lock() may not be called. We have to use
         * atomic_add_unless() here.
         */
-       atomic_add_unless(&mem->oom_lock, -1, 0);
+       for_each_mem_cgroup_tree(iter, mem)
+               atomic_add_unless(&iter->oom_lock, -1, 0);
        return 0;
 }
 
-static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
-{
-       mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
-}
 
 static DEFINE_MUTEX(memcg_oom_mutex);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
@@ -1462,34 +1571,73 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
 /*
  * Currently used to update mapped file statistics, but the routine can be
  * generalized to update other statistics as well.
+ *
+ * Notes: Race condition
+ *
+ * We usually use page_cgroup_lock() for accessing page_cgroup member but
+ * it tends to be costly. But considering some conditions, we doesn't need
+ * to do so _always_.
+ *
+ * Considering "charge", lock_page_cgroup() is not required because all
+ * file-stat operations happen after a page is attached to radix-tree. There
+ * are no race with "charge".
+ *
+ * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
+ * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
+ * if there are race with "uncharge". Statistics itself is properly handled
+ * by flags.
+ *
+ * Considering "move", this is an only case we see a race. To make the race
+ * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
+ * possibility of race condition. If there is, we take a lock.
  */
-void mem_cgroup_update_file_mapped(struct page *page, int val)
+
+static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
 {
        struct mem_cgroup *mem;
-       struct page_cgroup *pc;
+       struct page_cgroup *pc = lookup_page_cgroup(page);
+       bool need_unlock = false;
 
-       pc = lookup_page_cgroup(page);
        if (unlikely(!pc))
                return;
 
-       lock_page_cgroup(pc);
+       rcu_read_lock();
        mem = pc->mem_cgroup;
-       if (!mem || !PageCgroupUsed(pc))
-               goto done;
+       if (unlikely(!mem || !PageCgroupUsed(pc)))
+               goto out;
+       /* pc->mem_cgroup is unstable ? */
+       if (unlikely(mem_cgroup_stealed(mem))) {
+               /* take a lock against to access pc->mem_cgroup */
+               lock_page_cgroup(pc);
+               need_unlock = true;
+               mem = pc->mem_cgroup;
+               if (!mem || !PageCgroupUsed(pc))
+                       goto out;
+       }
 
-       /*
-        * Preemption is already disabled. We can use __this_cpu_xxx
-        */
-       if (val > 0) {
-               __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               SetPageCgroupFileMapped(pc);
-       } else {
-               __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               ClearPageCgroupFileMapped(pc);
+       this_cpu_add(mem->stat->count[idx], val);
+
+       switch (idx) {
+       case MEM_CGROUP_STAT_FILE_MAPPED:
+               if (val > 0)
+                       SetPageCgroupFileMapped(pc);
+               else if (!page_mapped(page))
+                       ClearPageCgroupFileMapped(pc);
+               break;
+       default:
+               BUG();
        }
 
-done:
-       unlock_page_cgroup(pc);
+out:
+       if (unlikely(need_unlock))
+               unlock_page_cgroup(pc);
+       rcu_read_unlock();
+       return;
+}
+
+void mem_cgroup_update_file_mapped(struct page *page, int val)
+{
+       mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
 }
 
 /*
@@ -1605,15 +1753,55 @@ static void drain_all_stock_sync(void)
        atomic_dec(&memcg_drain_count);
 }
 
-static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+/*
+ * This function drains percpu counter value from DEAD cpu and
+ * move it to local cpu. Note that this function can be preempted.
+ */
+static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
+{
+       int i;
+
+       spin_lock(&mem->pcp_counter_lock);
+       for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+               s64 x = per_cpu(mem->stat->count[i], cpu);
+
+               per_cpu(mem->stat->count[i], cpu) = 0;
+               mem->nocpu_base.count[i] += x;
+       }
+       /* need to clear ON_MOVE value, works as a kind of lock. */
+       per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
+       spin_unlock(&mem->pcp_counter_lock);
+}
+
+static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
+{
+       int idx = MEM_CGROUP_ON_MOVE;
+
+       spin_lock(&mem->pcp_counter_lock);
+       per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
+       spin_unlock(&mem->pcp_counter_lock);
+}
+
+static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
                                        unsigned long action,
                                        void *hcpu)
 {
        int cpu = (unsigned long)hcpu;
        struct memcg_stock_pcp *stock;
+       struct mem_cgroup *iter;
+
+       if ((action == CPU_ONLINE)) {
+               for_each_mem_cgroup_all(iter)
+                       synchronize_mem_cgroup_on_move(iter, cpu);
+               return NOTIFY_OK;
+       }
 
-       if (action != CPU_DEAD)
+       if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
                return NOTIFY_OK;
+
+       for_each_mem_cgroup_all(iter)
+               mem_cgroup_drain_pcp_counter(iter, cpu);
+
        stock = &per_cpu(memcg_stock, cpu);
        drain_stock(stock);
        return NOTIFY_OK;
@@ -3038,6 +3226,7 @@ move_account:
                lru_add_drain_all();
                drain_all_stock_sync();
                ret = 0;
+               mem_cgroup_start_move(mem);
                for_each_node_state(node, N_HIGH_MEMORY) {
                        for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
                                enum lru_list l;
@@ -3051,6 +3240,7 @@ move_account:
                        if (ret)
                                break;
                }
+               mem_cgroup_end_move(mem);
                memcg_oom_recover(mem);
                /* it seems parent cgroup doesn't have enough mem */
                if (ret == -ENOMEM)
@@ -3137,33 +3327,25 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
        return retval;
 }
 
-struct mem_cgroup_idx_data {
-       s64 val;
-       enum mem_cgroup_stat_index idx;
-};
 
-static int
-mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+                               enum mem_cgroup_stat_index idx)
 {
-       struct mem_cgroup_idx_data *d = data;
-       d->val += mem_cgroup_read_stat(mem, d->idx);
-       return 0;
-}
+       struct mem_cgroup *iter;
+       s64 val = 0;
 
-static void
-mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
-                               enum mem_cgroup_stat_index idx, s64 *val)
-{
-       struct mem_cgroup_idx_data d;
-       d.idx = idx;
-       d.val = 0;
-       mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
-       *val = d.val;
+       /* each per cpu's value can be minus.Then, use s64 */
+       for_each_mem_cgroup_tree(iter, mem)
+               val += mem_cgroup_read_stat(iter, idx);
+
+       if (val < 0) /* race ? */
+               val = 0;
+       return val;
 }
 
 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
 {
-       u64 idx_val, val;
+       u64 val;
 
        if (!mem_cgroup_is_root(mem)) {
                if (!swap)
@@ -3172,16 +3354,12 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
                        return res_counter_read_u64(&mem->memsw, RES_USAGE);
        }
 
-       mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
-       val = idx_val;
-       mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
-       val += idx_val;
+       val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
+       val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
 
-       if (swap) {
-               mem_cgroup_get_recursive_idx_stat(mem,
-                               MEM_CGROUP_STAT_SWAPOUT, &idx_val);
-               val += idx_val;
-       }
+       if (swap)
+               val += mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_SWAPOUT);
 
        return val << PAGE_SHIFT;
 }
@@ -3389,9 +3567,9 @@ struct {
 };
 
 
-static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+static void
+mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
 {
-       struct mcs_total_stat *s = data;
        s64 val;
 
        /* per cpu stat */
@@ -3421,13 +3599,15 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
        s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
        val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
        s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
-       return 0;
 }
 
 static void
 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
 {
-       mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+       struct mem_cgroup *iter;
+
+       for_each_mem_cgroup_tree(iter, mem)
+               mem_cgroup_get_local_stat(iter, s);
 }
 
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
@@ -3604,7 +3784,7 @@ static int compare_thresholds(const void *a, const void *b)
        return _a->threshold - _b->threshold;
 }
 
-static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
 {
        struct mem_cgroup_eventfd_list *ev;
 
@@ -3615,7 +3795,10 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
 
 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
 {
-       mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
+       struct mem_cgroup *iter;
+
+       for_each_mem_cgroup_tree(iter, mem)
+               mem_cgroup_oom_notify_cb(iter);
 }
 
 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
@@ -4032,6 +4215,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
                        vfree(mem);
                mem = NULL;
        }
+       spin_lock_init(&mem->pcp_counter_lock);
        return mem;
 }
 
@@ -4158,7 +4342,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                                                &per_cpu(memcg_stock, cpu);
                        INIT_WORK(&stock->work, drain_local_stock);
                }
-               hotcpu_notifier(memcg_stock_cpu_callback, 0);
+               hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
                mem->use_hierarchy = parent->use_hierarchy;
@@ -4513,6 +4697,7 @@ static void mem_cgroup_clear_mc(void)
        mc.to = NULL;
        mc.moving_task = NULL;
        spin_unlock(&mc.lock);
+       mem_cgroup_end_move(from);
        memcg_oom_recover(from);
        memcg_oom_recover(to);
        wake_up_all(&mc.waitq);
@@ -4543,6 +4728,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
                        VM_BUG_ON(mc.moved_charge);
                        VM_BUG_ON(mc.moved_swap);
                        VM_BUG_ON(mc.moving_task);
+                       mem_cgroup_start_move(from);
                        spin_lock(&mc.lock);
                        mc.from = from;
                        mc.to = mem;
index 3ce7bc3..3f48542 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -378,6 +378,7 @@ void release_pages(struct page **pages, int nr, int cold)
 
        pagevec_free(&pages_to_free);
 }
+EXPORT_SYMBOL(release_pages);
 
 /*
  * The pages which we're about to release may be in the deferred lru-addition
index 941f2a3..c1df2da 100644 (file)
@@ -346,8 +346,8 @@ int garp_request_join(const struct net_device *dev,
                      const struct garp_application *appl,
                      const void *data, u8 len, u8 type)
 {
-       struct garp_port *port = dev->garp_port;
-       struct garp_applicant *app = port->applicants[appl->type];
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
+       struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
        struct garp_attr *attr;
 
        spin_lock_bh(&app->lock);
@@ -366,8 +366,8 @@ void garp_request_leave(const struct net_device *dev,
                        const struct garp_application *appl,
                        const void *data, u8 len, u8 type)
 {
-       struct garp_port *port = dev->garp_port;
-       struct garp_applicant *app = port->applicants[appl->type];
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
+       struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
        struct garp_attr *attr;
 
        spin_lock_bh(&app->lock);
@@ -546,11 +546,11 @@ static int garp_init_port(struct net_device *dev)
 
 static void garp_release_port(struct net_device *dev)
 {
-       struct garp_port *port = dev->garp_port;
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
        unsigned int i;
 
        for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
-               if (port->applicants[i])
+               if (rtnl_dereference(port->applicants[i]))
                        return;
        }
        rcu_assign_pointer(dev->garp_port, NULL);
@@ -565,7 +565,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
 
        ASSERT_RTNL();
 
-       if (!dev->garp_port) {
+       if (!rtnl_dereference(dev->garp_port)) {
                err = garp_init_port(dev);
                if (err < 0)
                        goto err1;
@@ -601,8 +601,8 @@ EXPORT_SYMBOL_GPL(garp_init_applicant);
 
 void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
 {
-       struct garp_port *port = dev->garp_port;
-       struct garp_applicant *app = port->applicants[appl->type];
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
+       struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
 
        ASSERT_RTNL();
 
index 53c8f77..978c30b 100644 (file)
@@ -21,8 +21,8 @@
 #define GARP_ADDR_MAX  0x2F
 #define GARP_ADDR_RANGE        (GARP_ADDR_MAX - GARP_ADDR_MIN)
 
-static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
-static const struct stp_proto *stp_proto __read_mostly;
+static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
+static const struct stp_proto __rcu *stp_proto __read_mostly;
 
 static struct llc_sap *sap __read_mostly;
 static unsigned int sap_registered;
index 05b867e..52077ca 100644 (file)
@@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        ASSERT_RTNL();
 
-       grp = real_dev->vlgrp;
+       grp = rtnl_dereference(real_dev->vlgrp);
        BUG_ON(!grp);
 
        /* Take it out of our own structures, but be sure to interlock with
@@ -177,7 +177,7 @@ int register_vlan_dev(struct net_device *dev)
        struct vlan_group *grp, *ngrp = NULL;
        int err;
 
-       grp = real_dev->vlgrp;
+       grp = rtnl_dereference(real_dev->vlgrp);
        if (!grp) {
                ngrp = grp = vlan_group_alloc(real_dev);
                if (!grp)
@@ -385,7 +385,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
        }
 
-       grp = dev->vlgrp;
+       grp = rtnl_dereference(dev->vlgrp);
        if (!grp)
                goto out;
 
index 78b5a89..35dfb83 100644 (file)
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach);
 
 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 {
-       return ((features & NETIF_F_GEN_CSUM) ||
-               ((features & NETIF_F_IP_CSUM) &&
+       return ((features & NETIF_F_NO_CSUM) ||
+               ((features & NETIF_F_V4_CSUM) &&
                 protocol == htons(ETH_P_IP)) ||
-               ((features & NETIF_F_IPV6_CSUM) &&
+               ((features & NETIF_F_V6_CSUM) &&
                 protocol == htons(ETH_P_IPV6)) ||
                ((features & NETIF_F_FCOE_CRC) &&
                 protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 
 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
 {
+       __be16 protocol = skb->protocol;
        int features = dev->features;
 
-       if (vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb)) {
                features &= dev->vlan_features;
-
-       if (can_checksum_protocol(features, skb->protocol))
-               return true;
-
-       if (skb->protocol == htons(ETH_P_8021Q)) {
+       } else if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-               if (can_checksum_protocol(dev->features & dev->vlan_features,
-                                         veh->h_vlan_encapsulated_proto))
-                       return true;
+               protocol = veh->h_vlan_encapsulated_proto;
+               features &= dev->vlan_features;
        }
 
-       return false;
+       return can_checksum_protocol(features, protocol);
 }
 
 /**
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 }
 
 static DEFINE_PER_CPU(int, xmit_recursion);
-#define RECURSION_LIMIT 3
+#define RECURSION_LIMIT 10
 
 /**
  *     dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 #ifdef CONFIG_RPS
 
 /* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
 /*
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                       struct rps_dev_flow **rflowp)
 {
        struct netdev_rx_queue *rxqueue;
-       struct rps_map *map = NULL;
+       struct rps_map *map;
        struct rps_dev_flow_table *flow_table;
        struct rps_sock_flow_table *sock_flow_table;
        int cpu = -1;
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        } else
                rxqueue = dev->_rx;
 
-       if (rxqueue->rps_map) {
-               map = rcu_dereference(rxqueue->rps_map);
-               if (map && map->len == 1) {
+       map = rcu_dereference(rxqueue->rps_map);
+       if (map) {
+               if (map->len == 1) {
                        tcpu = map->cpus[0];
                        if (cpu_online(tcpu))
                                cpu = tcpu;
                        goto done;
                }
-       } else if (!rxqueue->rps_flow_table) {
+       } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
                goto done;
        }
 
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void)
                /* paranoia */
                BUG_ON(netdev_refcnt_read(dev));
                WARN_ON(rcu_dereference_raw(dev->ip_ptr));
-               WARN_ON(dev->ip6_ptr);
+               WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
                WARN_ON(dev->dn_ptr);
 
                if (dev->destructor)
index 1bc3f25..82a4369 100644 (file)
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 
                list_for_each_entry(r, &ops->rules_list, list) {
                        if (r->pref == rule->target) {
-                               rule->ctarget = r;
+                               RCU_INIT_POINTER(rule->ctarget, r);
                                break;
                        }
                }
 
-               if (rule->ctarget == NULL)
+               if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
                        unresolved = 1;
        } else if (rule->action == FR_ACT_GOTO)
                goto errout_free;
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 
        fib_rule_get(rule);
 
+       if (last)
+               list_add_rcu(&rule->list, &last->list);
+       else
+               list_add_rcu(&rule->list, &ops->rules_list);
+
        if (ops->unresolved_rules) {
                /*
                 * There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                list_for_each_entry(r, &ops->rules_list, list) {
                        if (r->action == FR_ACT_GOTO &&
                            r->target == rule->pref) {
-                               BUG_ON(r->ctarget != NULL);
+                               BUG_ON(rtnl_dereference(r->ctarget) != NULL);
                                rcu_assign_pointer(r->ctarget, rule);
                                if (--ops->unresolved_rules == 0)
                                        break;
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (unresolved)
                ops->unresolved_rules++;
 
-       if (last)
-               list_add_rcu(&rule->list, &last->list);
-       else
-               list_add_rcu(&rule->list, &ops->rules_list);
-
        notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
        flush_route_cache(ops);
        rules_ops_put(ops);
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                 */
                if (ops->nr_goto_rules > 0) {
                        list_for_each_entry(tmp, &ops->rules_list, list) {
-                               if (tmp->ctarget == rule) {
+                               if (rtnl_dereference(tmp->ctarget) == rule) {
                                        rcu_assign_pointer(tmp->ctarget, NULL);
                                        ops->unresolved_rules++;
                                }
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        frh->action = rule->action;
        frh->flags = rule->flags;
 
-       if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
+       if (rule->action == FR_ACT_GOTO &&
+           rcu_dereference_raw(rule->ctarget) == NULL)
                frh->flags |= FIB_RULE_UNRESOLVED;
 
        if (rule->iifname[0]) {
index 7adf503..7beaec3 100644 (file)
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        rcu_read_lock_bh();
        filter = rcu_dereference_bh(sk->sk_filter);
        if (filter) {
-               unsigned int pkt_len = sk_run_filter(skb, filter->insns,
-                               filter->len);
+               unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
        rcu_read_unlock_bh();
index b143173..a5ff5a8 100644 (file)
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        }
 
        spin_lock(&rps_map_lock);
-       old_map = queue->rps_map;
+       old_map = rcu_dereference_protected(queue->rps_map,
+                                           lockdep_is_held(&rps_map_lock));
        rcu_assign_pointer(queue->rps_map, map);
        spin_unlock(&rps_map_lock);
 
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
                table = NULL;
 
        spin_lock(&rps_dev_flow_lock);
-       old_table = queue->rps_flow_table;
+       old_table = rcu_dereference_protected(queue->rps_flow_table,
+                                             lockdep_is_held(&rps_dev_flow_lock));
        rcu_assign_pointer(queue->rps_flow_table, table);
        spin_unlock(&rps_dev_flow_lock);
 
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
 {
        struct netdev_rx_queue *queue = to_rx_queue(kobj);
        struct netdev_rx_queue *first = queue->first;
+       struct rps_map *map;
+       struct rps_dev_flow_table *flow_table;
 
-       if (queue->rps_map)
-               call_rcu(&queue->rps_map->rcu, rps_map_release);
 
-       if (queue->rps_flow_table)
-               call_rcu(&queue->rps_flow_table->rcu,
-                   rps_dev_flow_table_release);
+       map = rcu_dereference_raw(queue->rps_map);
+       if (map)
+               call_rcu(&map->rcu, rps_map_release);
+
+       flow_table = rcu_dereference_raw(queue->rps_flow_table);
+       if (flow_table)
+               call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
 
        if (atomic_dec_and_test(&first->count))
                kfree(first);
index c988e68..3f86026 100644 (file)
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data)
        BUG_ON(!mutex_is_locked(&net_mutex));
        BUG_ON(id == 0);
 
-       ng = old_ng = net->gen;
+       old_ng = rcu_dereference_protected(net->gen,
+                                          lockdep_is_held(&net_mutex));
+       ng = old_ng;
        if (old_ng->len >= id)
                goto assign;
 
index 2c0df0f..679b797 100644 (file)
@@ -771,10 +771,10 @@ done:
 static unsigned long num_arg(const char __user * user_buffer,
                             unsigned long maxlen, unsigned long *num)
 {
-       int i = 0;
+       int i;
        *num = 0;
 
-       for (; i < maxlen; i++) {
+       for (i = 0; i < maxlen; i++) {
                char c;
                if (get_user(c, &user_buffer[i]))
                        return -EFAULT;
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer,
 
 static int strn_len(const char __user * user_buffer, unsigned int maxlen)
 {
-       int i = 0;
+       int i;
 
-       for (; i < maxlen; i++) {
+       for (i = 0; i < maxlen; i++) {
                char c;
                if (get_user(c, &user_buffer[i]))
                        return -EFAULT;
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
 {
        struct seq_file *seq = file->private_data;
        struct pktgen_dev *pkt_dev = seq->private;
-       int i = 0, max, len;
+       int i, max, len;
        char name[16], valstr[32];
        unsigned long value = 0;
        char *pg_result = NULL;
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file,
                return -EINVAL;
        }
 
-       max = count - i;
-       tmp = count_trail_chars(&user_buffer[i], max);
+       max = count;
+       tmp = count_trail_chars(user_buffer, max);
        if (tmp < 0) {
                pr_warning("illegal format\n");
                return tmp;
        }
-       i += tmp;
+       i = tmp;
 
        /* Read variable name */
 
@@ -1764,7 +1764,7 @@ static ssize_t pktgen_thread_write(struct file *file,
 {
        struct seq_file *seq = file->private_data;
        struct pktgen_thread *t = seq->private;
-       int i = 0, max, len, ret;
+       int i, max, len, ret;
        char name[40];
        char *pg_result;
 
@@ -1773,12 +1773,12 @@ static ssize_t pktgen_thread_write(struct file *file,
                return -EINVAL;
        }
 
-       max = count - i;
-       len = count_trail_chars(&user_buffer[i], max);
+       max = count;
+       len = count_trail_chars(user_buffer, max);
        if (len < 0)
                return len;
 
-       i += len;
+       i = len;
 
        /* Read variable name */
 
@@ -1975,7 +1975,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
                                                 const char *ifname)
 {
        char b[IFNAMSIZ+5];
-       int i = 0;
+       int i;
 
        for (i = 0; ifname[i] != '@'; i++) {
                if (i == IFNAMSIZ)
@@ -2519,8 +2519,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
 {
        if (pkt_dev->cflows) {
                /* let go of the SAs if we have them */
-               int i = 0;
-               for ( i < pkt_dev->cflows; i++) {
+               int i;
+               for (i = 0; i < pkt_dev->cflows; i++) {
                        struct xfrm_state *x = pkt_dev->flows[i].x;
                        if (x) {
                                xfrm_state_put(x);
index 11db436..3eed542 100644 (file)
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                sock_reset_flag(newsk, SOCK_DONE);
                skb_queue_head_init(&newsk->sk_error_queue);
 
-               filter = newsk->sk_filter;
+               filter = rcu_dereference_protected(newsk->sk_filter, 1);
                if (filter != NULL)
                        sk_filter_charge(newsk, filter);
 
index 01eee5d..385b609 100644 (file)
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 
        mutex_lock(&sock_flow_mutex);
 
-       orig_sock_table = rps_sock_flow_table;
+       orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+                                       lockdep_is_held(&sock_flow_mutex));
        size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
 
        ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
index 43e1c59..b232375 100644 (file)
@@ -120,11 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz,
                struct fib_node *f;
 
                hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
-                       struct hlist_head __rcu *new_head;
+                       struct hlist_head *new_head;
 
                        hlist_del_rcu(&f->fn_hash);
 
-                       new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+                       new_head = rcu_dereference_protected(fz->fz_hash, 1) +
+                                  fn_hash(f->fn_key, fz);
                        hlist_add_head_rcu(&f->fn_hash, new_head);
                }
        }
@@ -179,8 +180,8 @@ static void fn_rehash_zone(struct fn_zone *fz)
                memcpy(&nfz, fz, sizeof(nfz));
 
                write_seqlock_bh(&fz->fz_lock);
-               old_ht = fz->fz_hash;
-               nfz.fz_hash = ht;
+               old_ht = rcu_dereference_protected(fz->fz_hash, 1);
+               RCU_INIT_POINTER(nfz.fz_hash, ht);
                nfz.fz_hashmask = new_hashmask;
                nfz.fz_divisor = new_divisor;
                fn_rebuild_zone(&nfz, old_ht, old_divisor);
@@ -236,7 +237,7 @@ fn_new_zone(struct fn_hash *table, int z)
        seqlock_init(&fz->fz_lock);
        fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
        fz->fz_hashmask = fz->fz_divisor - 1;
-       fz->fz_hash = fz->fz_embedded_hash;
+       RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
        fz->fz_order = z;
        fz->fz_revorder = 32 - z;
        fz->fz_mask = inet_make_mask(z);
@@ -272,7 +273,7 @@ int fib_table_lookup(struct fib_table *tb,
        for (fz = rcu_dereference(t->fn_zone_list);
             fz != NULL;
             fz = rcu_dereference(fz->fz_next)) {
-               struct hlist_head __rcu *head;
+               struct hlist_head *head;
                struct hlist_node *node;
                struct fib_node *f;
                __be32 k;
@@ -282,7 +283,7 @@ int fib_table_lookup(struct fib_table *tb,
                        seq = read_seqbegin(&fz->fz_lock);
                        k = fz_key(flp->fl4_dst, fz);
 
-                       head = &fz->fz_hash[fn_hash(k, fz)];
+                       head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
                        hlist_for_each_entry_rcu(f, node, head, fn_hash) {
                                if (f->fn_key != k)
                                        continue;
@@ -311,6 +312,7 @@ void fib_table_select_default(struct fib_table *tb,
        struct fib_info *last_resort;
        struct fn_hash *t = (struct fn_hash *)tb->tb_data;
        struct fn_zone *fz = t->fn_zones[0];
+       struct hlist_head *head;
 
        if (fz == NULL)
                return;
@@ -320,7 +322,8 @@ void fib_table_select_default(struct fib_table *tb,
        order = -1;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
+       head = rcu_dereference(fz->fz_hash);
+       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
                struct fib_alias *fa;
 
                list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
@@ -374,7 +377,7 @@ out:
 /* Insert node F to FZ. */
 static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
 {
-       struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
 
        hlist_add_head_rcu(&f->fn_hash, head);
 }
@@ -382,7 +385,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
 /* Return the node in FZ matching KEY. */
 static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
 {
-       struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
+       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
        struct hlist_node *node;
        struct fib_node *f;
 
@@ -662,7 +665,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
 
 static int fn_flush_list(struct fn_zone *fz, int idx)
 {
-       struct hlist_head *head = &fz->fz_hash[idx];
+       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
        struct hlist_node *node, *n;
        struct fib_node *f;
        int found = 0;
@@ -761,14 +764,15 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
                   struct fn_zone *fz)
 {
        int h, s_h;
+       struct hlist_head *head = rcu_dereference(fz->fz_hash);
 
-       if (fz->fz_hash == NULL)
+       if (head == NULL)
                return skb->len;
        s_h = cb->args[3];
        for (h = s_h; h < fz->fz_divisor; h++) {
-               if (hlist_empty(&fz->fz_hash[h]))
+               if (hlist_empty(head + h))
                        continue;
-               if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
+               if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
                        cb->args[3] = h;
                        return -1;
                }
@@ -872,7 +876,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
                if (!iter->zone->fz_nent)
                        continue;
 
-               iter->hash_head = iter->zone->fz_hash;
+               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
                maxslot = iter->zone->fz_divisor;
 
                for (iter->bucket = 0; iter->bucket < maxslot;
@@ -957,7 +961,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
                        goto out;
 
                iter->bucket = 0;
-               iter->hash_head = iter->zone->fz_hash;
+               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
 
                hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
                        list_for_each_entry(fa, &fn->fn_alias, fa_list) {
index caea688..c6933f2 100644 (file)
@@ -22,7 +22,7 @@
 #include <net/gre.h>
 
 
-static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
 static DEFINE_SPINLOCK(gre_proto_lock);
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
@@ -51,7 +51,8 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
                goto err_out;
 
        spin_lock(&gre_proto_lock);
-       if (gre_proto[version] != proto)
+       if (rcu_dereference_protected(gre_proto[version],
+                       lockdep_is_held(&gre_proto_lock)) != proto)
                goto err_out_unlock;
        rcu_assign_pointer(gre_proto[version], NULL);
        spin_unlock(&gre_proto_lock);
index 9ffa24b..9e94d7c 100644 (file)
@@ -72,18 +72,19 @@ static struct kmem_cache *peer_cachep __read_mostly;
 #define node_height(x) x->avl_height
 
 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 static const struct inet_peer peer_fake_node = {
-       .avl_left       = peer_avl_empty,
-       .avl_right      = peer_avl_empty,
+       .avl_left       = peer_avl_empty_rcu,
+       .avl_right      = peer_avl_empty_rcu,
        .avl_height     = 0
 };
 
 static struct {
-       struct inet_peer *root;
+       struct inet_peer __rcu *root;
        spinlock_t      lock;
        int             total;
 } peers = {
-       .root           = peer_avl_empty,
+       .root           = peer_avl_empty_rcu,
        .lock           = __SPIN_LOCK_UNLOCKED(peers.lock),
        .total          = 0,
 };
@@ -156,11 +157,14 @@ static void unlink_from_unused(struct inet_peer *p)
  */
 #define lookup(_daddr, _stack)                                         \
 ({                                                             \
-       struct inet_peer *u, **v;                               \
+       struct inet_peer *u;                                    \
+       struct inet_peer __rcu **v;                             \
                                                                \
        stackptr = _stack;                                      \
        *stackptr++ = &peers.root;                              \
-       for (u = peers.root; u != peer_avl_empty; ) {           \
+       for (u = rcu_dereference_protected(peers.root,          \
+                       lockdep_is_held(&peers.lock));          \
+            u != peer_avl_empty; ) {                           \
                if (_daddr == u->v4daddr)                       \
                        break;                                  \
                if ((__force __u32)_daddr < (__force __u32)u->v4daddr)  \
@@ -168,7 +172,8 @@ static void unlink_from_unused(struct inet_peer *p)
                else                                            \
                        v = &u->avl_right;                      \
                *stackptr++ = v;                                \
-               u = *v;                                         \
+               u = rcu_dereference_protected(*v,               \
+                       lockdep_is_held(&peers.lock));          \
        }                                                       \
        u;                                                      \
 })
@@ -209,13 +214,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
 /* Called with local BH disabled and the pool lock held. */
 #define lookup_rightempty(start)                               \
 ({                                                             \
-       struct inet_peer *u, **v;                               \
+       struct inet_peer *u;                                    \
+       struct inet_peer __rcu **v;                             \
        *stackptr++ = &start->avl_left;                         \
        v = &start->avl_left;                                   \
-       for (u = *v; u->avl_right != peer_avl_empty; ) {        \
+       for (u = rcu_dereference_protected(*v,                  \
+                       lockdep_is_held(&peers.lock));          \
+            u->avl_right != peer_avl_empty_rcu; ) {            \
                v = &u->avl_right;                              \
                *stackptr++ = v;                                \
-               u = *v;                                         \
+               u = rcu_dereference_protected(*v,               \
+                       lockdep_is_held(&peers.lock));          \
        }                                                       \
        u;                                                      \
 })
@@ -224,74 +233,86 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
  * Variable names are the proof of operation correctness.
  * Look into mm/map_avl.c for more detail description of the ideas.
  */
-static void peer_avl_rebalance(struct inet_peer **stack[],
-               struct inet_peer ***stackend)
+static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
+               struct inet_peer __rcu ***stackend)
 {
-       struct inet_peer **nodep, *node, *l, *r;
+       struct inet_peer __rcu **nodep;
+       struct inet_peer *node, *l, *r;
        int lh, rh;
 
        while (stackend > stack) {
                nodep = *--stackend;
-               node = *nodep;
-               l = node->avl_left;
-               r = node->avl_right;
+               node = rcu_dereference_protected(*nodep,
+                               lockdep_is_held(&peers.lock));
+               l = rcu_dereference_protected(node->avl_left,
+                               lockdep_is_held(&peers.lock));
+               r = rcu_dereference_protected(node->avl_right,
+                               lockdep_is_held(&peers.lock));
                lh = node_height(l);
                rh = node_height(r);
                if (lh > rh + 1) { /* l: RH+2 */
                        struct inet_peer *ll, *lr, *lrl, *lrr;
                        int lrh;
-                       ll = l->avl_left;
-                       lr = l->avl_right;
+                       ll = rcu_dereference_protected(l->avl_left,
+                               lockdep_is_held(&peers.lock));
+                       lr = rcu_dereference_protected(l->avl_right,
+                               lockdep_is_held(&peers.lock));
                        lrh = node_height(lr);
                        if (lrh <= node_height(ll)) {   /* ll: RH+1 */
-                               node->avl_left = lr;    /* lr: RH or RH+1 */
-                               node->avl_right = r;    /* r: RH */
+                               RCU_INIT_POINTER(node->avl_left, lr);   /* lr: RH or RH+1 */
+                               RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
                                node->avl_height = lrh + 1; /* RH+1 or RH+2 */
-                               l->avl_left = ll;       /* ll: RH+1 */
-                               l->avl_right = node;    /* node: RH+1 or RH+2 */
+                               RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
+                               RCU_INIT_POINTER(l->avl_right, node);   /* node: RH+1 or RH+2 */
                                l->avl_height = node->avl_height + 1;
-                               *nodep = l;
+                               RCU_INIT_POINTER(*nodep, l);
                        } else { /* ll: RH, lr: RH+1 */
-                               lrl = lr->avl_left;     /* lrl: RH or RH-1 */
-                               lrr = lr->avl_right;    /* lrr: RH or RH-1 */
-                               node->avl_left = lrr;   /* lrr: RH or RH-1 */
-                               node->avl_right = r;    /* r: RH */
+                               lrl = rcu_dereference_protected(lr->avl_left,
+                                       lockdep_is_held(&peers.lock));  /* lrl: RH or RH-1 */
+                               lrr = rcu_dereference_protected(lr->avl_right,
+                                       lockdep_is_held(&peers.lock));  /* lrr: RH or RH-1 */
+                               RCU_INIT_POINTER(node->avl_left, lrr);  /* lrr: RH or RH-1 */
+                               RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
                                node->avl_height = rh + 1; /* node: RH+1 */
-                               l->avl_left = ll;       /* ll: RH */
-                               l->avl_right = lrl;     /* lrl: RH or RH-1 */
+                               RCU_INIT_POINTER(l->avl_left, ll);      /* ll: RH */
+                               RCU_INIT_POINTER(l->avl_right, lrl);    /* lrl: RH or RH-1 */
                                l->avl_height = rh + 1; /* l: RH+1 */
-                               lr->avl_left = l;       /* l: RH+1 */
-                               lr->avl_right = node;   /* node: RH+1 */
+                               RCU_INIT_POINTER(lr->avl_left, l);      /* l: RH+1 */
+                               RCU_INIT_POINTER(lr->avl_right, node);  /* node: RH+1 */
                                lr->avl_height = rh + 2;
-                               *nodep = lr;
+                               RCU_INIT_POINTER(*nodep, lr);
                        }
                } else if (rh > lh + 1) { /* r: LH+2 */
                        struct inet_peer *rr, *rl, *rlr, *rll;
                        int rlh;
-                       rr = r->avl_right;
-                       rl = r->avl_left;
+                       rr = rcu_dereference_protected(r->avl_right,
+                               lockdep_is_held(&peers.lock));
+                       rl = rcu_dereference_protected(r->avl_left,
+                               lockdep_is_held(&peers.lock));
                        rlh = node_height(rl);
                        if (rlh <= node_height(rr)) {   /* rr: LH+1 */
-                               node->avl_right = rl;   /* rl: LH or LH+1 */
-                               node->avl_left = l;     /* l: LH */
+                               RCU_INIT_POINTER(node->avl_right, rl);  /* rl: LH or LH+1 */
+                               RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
                                node->avl_height = rlh + 1; /* LH+1 or LH+2 */
-                               r->avl_right = rr;      /* rr: LH+1 */
-                               r->avl_left = node;     /* node: LH+1 or LH+2 */
+                               RCU_INIT_POINTER(r->avl_right, rr);     /* rr: LH+1 */
+                               RCU_INIT_POINTER(r->avl_left, node);    /* node: LH+1 or LH+2 */
                                r->avl_height = node->avl_height + 1;
-                               *nodep = r;
+                               RCU_INIT_POINTER(*nodep, r);
                        } else { /* rr: RH, rl: RH+1 */
-                               rlr = rl->avl_right;    /* rlr: LH or LH-1 */
-                               rll = rl->avl_left;     /* rll: LH or LH-1 */
-                               node->avl_right = rll;  /* rll: LH or LH-1 */
-                               node->avl_left = l;     /* l: LH */
+                               rlr = rcu_dereference_protected(rl->avl_right,
+                                       lockdep_is_held(&peers.lock));  /* rlr: LH or LH-1 */
+                               rll = rcu_dereference_protected(rl->avl_left,
+                                       lockdep_is_held(&peers.lock));  /* rll: LH or LH-1 */
+                               RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
+                               RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
                                node->avl_height = lh + 1; /* node: LH+1 */
-                               r->avl_right = rr;      /* rr: LH */
-                               r->avl_left = rlr;      /* rlr: LH or LH-1 */
+                               RCU_INIT_POINTER(r->avl_right, rr);     /* rr: LH */
+                               RCU_INIT_POINTER(r->avl_left, rlr);     /* rlr: LH or LH-1 */
                                r->avl_height = lh + 1; /* r: LH+1 */
-                               rl->avl_right = r;      /* r: LH+1 */
-                               rl->avl_left = node;    /* node: LH+1 */
+                               RCU_INIT_POINTER(rl->avl_right, r);     /* r: LH+1 */
+                               RCU_INIT_POINTER(rl->avl_left, node);   /* node: LH+1 */
                                rl->avl_height = lh + 2;
-                               *nodep = rl;
+                               RCU_INIT_POINTER(*nodep, rl);
                        }
                } else {
                        node->avl_height = (lh > rh ? lh : rh) + 1;
@@ -303,10 +324,10 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
 #define link_to_pool(n)                                                \
 do {                                                           \
        n->avl_height = 1;                                      \
-       n->avl_left = peer_avl_empty;                           \
-       n->avl_right = peer_avl_empty;                          \
-       smp_wmb(); /* lockless readers can catch us now */      \
-       **--stackptr = n;                                       \
+       n->avl_left = peer_avl_empty_rcu;                       \
+       n->avl_right = peer_avl_empty_rcu;                      \
+       /* lockless readers can catch us now */                 \
+       rcu_assign_pointer(**--stackptr, n);                    \
        peer_avl_rebalance(stack, stackptr);                    \
 } while (0)
 
@@ -330,24 +351,25 @@ static void unlink_from_pool(struct inet_peer *p)
         * We use refcnt=-1 to alert lockless readers this entry is deleted.
         */
        if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
-               struct inet_peer **stack[PEER_MAXDEPTH];
-               struct inet_peer ***stackptr, ***delp;
+               struct inet_peer __rcu **stack[PEER_MAXDEPTH];
+               struct inet_peer __rcu ***stackptr, ***delp;
                if (lookup(p->v4daddr, stack) != p)
                        BUG();
                delp = stackptr - 1; /* *delp[0] == p */
-               if (p->avl_left == peer_avl_empty) {
+               if (p->avl_left == peer_avl_empty_rcu) {
                        *delp[0] = p->avl_right;
                        --stackptr;
                } else {
                        /* look for a node to insert instead of p */
                        struct inet_peer *t;
                        t = lookup_rightempty(p);
-                       BUG_ON(*stackptr[-1] != t);
+                       BUG_ON(rcu_dereference_protected(*stackptr[-1],
+                                       lockdep_is_held(&peers.lock)) != t);
                        **--stackptr = t->avl_left;
                        /* t is removed, t->v4daddr > x->v4daddr for any
                         * x in p->avl_left subtree.
                         * Put t in the old place of p. */
-                       *delp[0] = t;
+                       RCU_INIT_POINTER(*delp[0], t);
                        t->avl_left = p->avl_left;
                        t->avl_right = p->avl_right;
                        t->avl_height = p->avl_height;
@@ -414,7 +436,7 @@ static int cleanup_once(unsigned long ttl)
 struct inet_peer *inet_getpeer(__be32 daddr, int create)
 {
        struct inet_peer *p;
-       struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
+       struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
 
        /* Look up for the address quickly, lockless.
         * Because of a concurrent writer, we might not find an existing entry.
index d0ffcbe..01087e0 100644 (file)
@@ -1072,6 +1072,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                        break;
                                }
                                ipgre_tunnel_unlink(ign, t);
+                               synchronize_net();
                                t->parms.iph.saddr = p.iph.saddr;
                                t->parms.iph.daddr = p.iph.daddr;
                                t->parms.i_key = p.i_key;
index 64b70ad..3948c86 100644 (file)
@@ -238,7 +238,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
    but receiver should be enough clever f.e. to forward mtrace requests,
    sent to multicast group to reach destination designated router.
  */
-struct ip_ra_chain *ip_ra_chain;
+struct ip_ra_chain __rcu *ip_ra_chain;
 static DEFINE_SPINLOCK(ip_ra_lock);
 
 
@@ -253,7 +253,8 @@ static void ip_ra_destroy_rcu(struct rcu_head *head)
 int ip_ra_control(struct sock *sk, unsigned char on,
                  void (*destructor)(struct sock *))
 {
-       struct ip_ra_chain *ra, *new_ra, **rap;
+       struct ip_ra_chain *ra, *new_ra;
+       struct ip_ra_chain __rcu **rap;
 
        if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
                return -EINVAL;
@@ -261,7 +262,10 @@ int ip_ra_control(struct sock *sk, unsigned char on,
        new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
 
        spin_lock_bh(&ip_ra_lock);
-       for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
+       for (rap = &ip_ra_chain;
+            (ra = rcu_dereference_protected(*rap,
+                       lockdep_is_held(&ip_ra_lock))) != NULL;
+            rap = &ra->next) {
                if (ra->sk == sk) {
                        if (on) {
                                spin_unlock_bh(&ip_ra_lock);
index e9b816e..cd300aa 100644 (file)
@@ -676,6 +676,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                }
                                t = netdev_priv(dev);
                                ipip_tunnel_unlink(ipn, t);
+                               synchronize_net();
                                t->parms.iph.saddr = p.iph.saddr;
                                t->parms.iph.daddr = p.iph.daddr;
                                memcpy(dev->dev_addr, &p.iph.saddr, 4);
index 65699c2..9ae5c01 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
+const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
 
 /*
  *     Add a protocol handler to the hash tables
@@ -38,7 +38,8 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
 {
        int hash = protocol & (MAX_INET_PROTOS - 1);
 
-       return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
+       return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+                       NULL, prot) ? 0 : -1;
 }
 EXPORT_SYMBOL(inet_add_protocol);
 
@@ -50,7 +51,8 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
 {
        int ret, hash = protocol & (MAX_INET_PROTOS - 1);
 
-       ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
+       ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+                      prot, NULL) == prot) ? 0 : -1;
 
        synchronize_net();
 
index d6cb2bf..987bf9a 100644 (file)
@@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = {
  */
 
 struct rt_hash_bucket {
-       struct rtable   *chain;
+       struct rtable __rcu     *chain;
 };
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
        struct rtable *r = NULL;
 
        for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
-               if (!rt_hash_table[st->bucket].chain)
+               if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
                        continue;
                rcu_read_lock_bh();
                r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
 {
        struct rt_cache_iter_state *st = seq->private;
 
-       r = r->dst.rt_next;
+       r = rcu_dereference_bh(r->dst.rt_next);
        while (!r) {
                rcu_read_unlock_bh();
                do {
                        if (--st->bucket < 0)
                                return NULL;
-               } while (!rt_hash_table[st->bucket].chain);
+               } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
                rcu_read_lock_bh();
-               r = rt_hash_table[st->bucket].chain;
+               r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
        }
-       return rcu_dereference_bh(r);
+       return r;
 }
 
 static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -721,19 +721,23 @@ static void rt_do_flush(int process_context)
        for (i = 0; i <= rt_hash_mask; i++) {
                if (process_context && need_resched())
                        cond_resched();
-               rth = rt_hash_table[i].chain;
+               rth = rcu_dereference_raw(rt_hash_table[i].chain);
                if (!rth)
                        continue;
 
                spin_lock_bh(rt_hash_lock_addr(i));
 #ifdef CONFIG_NET_NS
                {
-               struct rtable ** prev, * p;
+               struct rtable __rcu **prev;
+               struct rtable *p;
 
-               rth = rt_hash_table[i].chain;
+               rth = rcu_dereference_protected(rt_hash_table[i].chain,
+                       lockdep_is_held(rt_hash_lock_addr(i)));
 
                /* defer releasing the head of the list after spin_unlock */
-               for (tail = rth; tail; tail = tail->dst.rt_next)
+               for (tail = rth; tail;
+                    tail = rcu_dereference_protected(tail->dst.rt_next,
+                               lockdep_is_held(rt_hash_lock_addr(i))))
                        if (!rt_is_expired(tail))
                                break;
                if (rth != tail)
@@ -741,8 +745,12 @@ static void rt_do_flush(int process_context)
 
                /* call rt_free on entries after the tail requiring flush */
                prev = &rt_hash_table[i].chain;
-               for (p = *prev; p; p = next) {
-                       next = p->dst.rt_next;
+               for (p = rcu_dereference_protected(*prev,
+                               lockdep_is_held(rt_hash_lock_addr(i)));
+                    p != NULL;
+                    p = next) {
+                       next = rcu_dereference_protected(p->dst.rt_next,
+                               lockdep_is_held(rt_hash_lock_addr(i)));
                        if (!rt_is_expired(p)) {
                                prev = &p->dst.rt_next;
                        } else {
@@ -752,14 +760,15 @@ static void rt_do_flush(int process_context)
                }
                }
 #else
-               rth = rt_hash_table[i].chain;
-               rt_hash_table[i].chain = NULL;
+               rth = rcu_dereference_protected(rt_hash_table[i].chain,
+                       lockdep_is_held(rt_hash_lock_addr(i)));
+               rcu_assign_pointer(rt_hash_table[i].chain, NULL);
                tail = NULL;
 #endif
                spin_unlock_bh(rt_hash_lock_addr(i));
 
                for (; rth != tail; rth = next) {
-                       next = rth->dst.rt_next;
+                       next = rcu_dereference_protected(rth->dst.rt_next, 1);
                        rt_free(rth);
                }
        }
@@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        while (aux != rth) {
                if (compare_hash_inputs(&aux->fl, &rth->fl))
                        return 0;
-               aux = aux->dst.rt_next;
+               aux = rcu_dereference_protected(aux->dst.rt_next, 1);
        }
        return ONE;
 }
@@ -799,7 +808,8 @@ static void rt_check_expire(void)
 {
        static unsigned int rover;
        unsigned int i = rover, goal;
-       struct rtable *rth, **rthp;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
        unsigned long samples = 0;
        unsigned long sum = 0, sum2 = 0;
        unsigned long delta;
@@ -825,11 +835,12 @@ static void rt_check_expire(void)
 
                samples++;
 
-               if (*rthp == NULL)
+               if (rcu_dereference_raw(*rthp) == NULL)
                        continue;
                length = 0;
                spin_lock_bh(rt_hash_lock_addr(i));
-               while ((rth = *rthp) != NULL) {
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
                        prefetch(rth->dst.rt_next);
                        if (rt_is_expired(rth)) {
                                *rthp = rth->dst.rt_next;
@@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
        static unsigned long last_gc;
        static int rover;
        static int equilibrium;
-       struct rtable *rth, **rthp;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
        unsigned long now = jiffies;
        int goal;
        int entries = dst_entries_get_fast(&ipv4_dst_ops);
@@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
                        k = (k + 1) & rt_hash_mask;
                        rthp = &rt_hash_table[k].chain;
                        spin_lock_bh(rt_hash_lock_addr(k));
-                       while ((rth = *rthp) != NULL) {
+                       while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
                                if (!rt_is_expired(rth) &&
                                        !rt_may_expire(rth, tmo, expire)) {
                                        tmo >>= 1;
@@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head)
 
        while (rth) {
                length += has_noalias(head, rth);
-               rth = rth->dst.rt_next;
+               rth = rcu_dereference_protected(rth->dst.rt_next, 1);
        }
        return length >> FRACT_BITS;
 }
@@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head)
 static int rt_intern_hash(unsigned hash, struct rtable *rt,
                          struct rtable **rp, struct sk_buff *skb, int ifindex)
 {
-       struct rtable   *rth, **rthp;
+       struct rtable   *rth, *cand;
+       struct rtable __rcu **rthp, **candp;
        unsigned long   now;
-       struct rtable *cand, **candp;
        u32             min_score;
        int             chain_length;
        int attempts = !in_softirq();
@@ -1128,7 +1141,8 @@ restart:
        rthp = &rt_hash_table[hash].chain;
 
        spin_lock_bh(rt_hash_lock_addr(hash));
-       while ((rth = *rthp) != NULL) {
+       while ((rth = rcu_dereference_protected(*rthp,
+                       lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
                if (rt_is_expired(rth)) {
                        *rthp = rth->dst.rt_next;
                        rt_free(rth);
@@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident);
 
 static void rt_del(unsigned hash, struct rtable *rt)
 {
-       struct rtable **rthp, *aux;
+       struct rtable __rcu **rthp;
+       struct rtable *aux;
 
        rthp = &rt_hash_table[hash].chain;
        spin_lock_bh(rt_hash_lock_addr(hash));
        ip_rt_put(rt);
-       while ((aux = *rthp) != NULL) {
+       while ((aux = rcu_dereference_protected(*rthp,
+                       lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
                if (aux == rt || rt_is_expired(aux)) {
                        *rthp = aux->dst.rt_next;
                        rt_free(aux);
@@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
 {
        int i, k;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rth, **rthp;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
        __be32  skeys[2] = { saddr, 0 };
        int  ikeys[2] = { dev->ifindex, 0 };
        struct netevent_redirect netevent;
@@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                        unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
                                                rt_genid(net));
 
-                       rthp=&rt_hash_table[hash].chain;
+                       rthp = &rt_hash_table[hash].chain;
 
                        while ((rth = rcu_dereference(*rthp)) != NULL) {
                                struct rtable *rt;
index 9a17bd2..ac3b3ee 100644 (file)
 #include <net/protocol.h>
 #include <net/xfrm.h>
 
-static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
-static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
 static DEFINE_MUTEX(tunnel4_mutex);
 
-static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
+static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
 {
        return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
 }
 
 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
 {
-       struct xfrm_tunnel **pprev;
+       struct xfrm_tunnel __rcu **pprev;
+       struct xfrm_tunnel *t;
+
        int ret = -EEXIST;
        int priority = handler->priority;
 
        mutex_lock(&tunnel4_mutex);
 
-       for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
-               if ((*pprev)->priority > priority)
+       for (pprev = fam_handlers(family);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel4_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority > priority)
                        break;
-               if ((*pprev)->priority == priority)
+               if (t->priority == priority)
                        goto err;
        }
 
@@ -52,13 +57,17 @@ EXPORT_SYMBOL(xfrm4_tunnel_register);
 
 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
 {
-       struct xfrm_tunnel **pprev;
+       struct xfrm_tunnel __rcu **pprev;
+       struct xfrm_tunnel *t;
        int ret = -ENOENT;
 
        mutex_lock(&tunnel4_mutex);
 
-       for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
-               if (*pprev == handler) {
+       for (pprev = fam_handlers(family);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel4_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
                        *pprev = handler->next;
                        ret = 0;
                        break;
index b3f7e8c..28cb2d7 100644 (file)
@@ -1413,7 +1413,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (sk->sk_filter) {
+       if (rcu_dereference_raw(sk->sk_filter)) {
                if (udp_lib_checksum_complete(skb))
                        goto drop;
        }
index ec7a91d..e048ec6 100644 (file)
@@ -836,7 +836,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
 {
        struct inet6_dev *idev = ifp->idev;
        struct in6_addr addr, *tmpaddr;
-       unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp;
+       unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
        unsigned long regen_advance;
        int tmp_plen;
        int ret = 0;
@@ -886,12 +886,13 @@ retry:
                goto out;
        }
        memcpy(&addr.s6_addr[8], idev->rndid, 8);
+       age = (jiffies - ifp->tstamp) / HZ;
        tmp_valid_lft = min_t(__u32,
                              ifp->valid_lft,
-                             idev->cnf.temp_valid_lft);
+                             idev->cnf.temp_valid_lft + age);
        tmp_prefered_lft = min_t(__u32,
                                 ifp->prefered_lft,
-                                idev->cnf.temp_prefered_lft -
+                                idev->cnf.temp_prefered_lft + age -
                                 idev->cnf.max_desync_factor);
        tmp_plen = ifp->prefix_len;
        max_addresses = idev->cnf.max_addresses;
@@ -1426,8 +1427,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
 {
        struct inet6_dev *idev = ifp->idev;
 
-       if (addrconf_dad_end(ifp))
+       if (addrconf_dad_end(ifp)) {
+               in6_ifa_put(ifp);
                return;
+       }
 
        if (net_ratelimit())
                printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
@@ -2021,10 +2024,11 @@ ok:
                                        ipv6_ifa_notify(0, ift);
                        }
 
-                       if (create && in6_dev->cnf.use_tempaddr > 0) {
+                       if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
                                /*
                                 * When a new public address is created as described in [ADDRCONF],
-                                * also create a new temporary address.
+                                * also create a new temporary address. Also create a temporary
+                                * address if it's enabled but no temporary address currently exists.
                                 */
                                read_unlock_bh(&in6_dev->lock);
                                ipv6_create_tempaddr(ifp, NULL);
index c2c0f89..2a59610 100644 (file)
@@ -1284,6 +1284,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                t = netdev_priv(dev);
 
                        ip6_tnl_unlink(ip6n, t);
+                       synchronize_net();
                        err = ip6_tnl_change(t, &p);
                        ip6_tnl_link(ip6n, t);
                        netdev_state_change(dev);
@@ -1371,6 +1372,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
 }
 
 
index 0553867..d1770e0 100644 (file)
@@ -343,6 +343,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                break;
 
        case IPV6_TRANSPARENT:
+               if (!capable(CAP_NET_ADMIN)) {
+                       retv = -EPERM;
+                       break;
+               }
                if (optlen < sizeof(int))
                        goto e_inval;
                /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
index 44d2eea..4484648 100644 (file)
@@ -5,10 +5,15 @@
 menu "IPv6: Netfilter Configuration"
        depends on INET && IPV6 && NETFILTER
 
+config NF_DEFRAG_IPV6
+       tristate
+       default n
+
 config NF_CONNTRACK_IPV6
        tristate "IPv6 connection tracking support"
        depends on INET && IPV6 && NF_CONNTRACK
        default m if NETFILTER_ADVANCED=n
+       select NF_DEFRAG_IPV6
        ---help---
          Connection tracking keeps a record of what packets have passed
          through your machine, in order to figure out how they are related
index 3f8e4a3..0a432c9 100644 (file)
@@ -12,11 +12,14 @@ obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv6-objs  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
-nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 
 # l3 independent conntrack
 obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
 
+# defrag
+nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
+obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
index 489d71b..3a3f129 100644 (file)
@@ -625,21 +625,24 @@ int nf_ct_frag6_init(void)
        inet_frags_init_net(&nf_init_frags);
        inet_frags_init(&nf_frags);
 
+#ifdef CONFIG_SYSCTL
        nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
                                                          nf_ct_frag6_sysctl_table);
        if (!nf_ct_frag6_sysctl_header) {
                inet_frags_fini(&nf_frags);
                return -ENOMEM;
        }
+#endif
 
        return 0;
 }
 
 void nf_ct_frag6_cleanup(void)
 {
+#ifdef CONFIG_SYSCTL
        unregister_sysctl_table(nf_ct_frag6_sysctl_header);
        nf_ct_frag6_sysctl_header = NULL;
-
+#endif
        inet_frags_fini(&nf_frags);
 
        nf_init_frags.low_thresh = 0;
index 9bb936a..9a7978f 100644 (file)
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
 
 int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
 {
        int hash = protocol & (MAX_INET_PROTOS - 1);
 
-       return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
+       return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+                       NULL, prot) ? 0 : -1;
 }
 EXPORT_SYMBOL(inet6_add_protocol);
 
@@ -43,7 +44,8 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
 {
        int ret, hash = protocol & (MAX_INET_PROTOS - 1);
 
-       ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
+       ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+                      prot, NULL) == prot) ? 0 : -1;
 
        synchronize_net();
 
index 45e6efb..86c3952 100644 (file)
@@ -373,7 +373,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
 
 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
 {
-       if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
+       if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
            skb_checksum_complete(skb)) {
                atomic_inc(&sk->sk_drops);
                kfree_skb(skb);
index 367a6cc..d6bfaec 100644 (file)
@@ -963,6 +963,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                }
                                t = netdev_priv(dev);
                                ipip6_tunnel_unlink(sitn, t);
+                               synchronize_net();
                                t->parms.iph.saddr = p.iph.saddr;
                                t->parms.iph.daddr = p.iph.daddr;
                                memcpy(dev->dev_addr, &p.iph.saddr, 4);
index d986472..4f3cec1 100644 (file)
 #include <net/protocol.h>
 #include <net/xfrm.h>
 
-static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
-static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
 static DEFINE_MUTEX(tunnel6_mutex);
 
 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
 {
-       struct xfrm6_tunnel **pprev;
+       struct xfrm6_tunnel __rcu **pprev;
+       struct xfrm6_tunnel *t;
        int ret = -EEXIST;
        int priority = handler->priority;
 
        mutex_lock(&tunnel6_mutex);
 
        for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
-            *pprev; pprev = &(*pprev)->next) {
-               if ((*pprev)->priority > priority)
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel6_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority > priority)
                        break;
-               if ((*pprev)->priority == priority)
+               if (t->priority == priority)
                        goto err;
        }
 
@@ -65,14 +68,17 @@ EXPORT_SYMBOL(xfrm6_tunnel_register);
 
 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
 {
-       struct xfrm6_tunnel **pprev;
+       struct xfrm6_tunnel __rcu **pprev;
+       struct xfrm6_tunnel *t;
        int ret = -ENOENT;
 
        mutex_lock(&tunnel6_mutex);
 
        for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
-            *pprev; pprev = &(*pprev)->next) {
-               if (*pprev == handler) {
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel6_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
                        *pprev = handler->next;
                        ret = 0;
                        break;
index c84dad4..91def93 100644 (file)
@@ -527,7 +527,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                }
        }
 
-       if (sk->sk_filter) {
+       if (rcu_dereference_raw(sk->sk_filter)) {
                if (udp_lib_checksum_complete(skb))
                        goto drop;
        }
index 1712af1..c64ce0a 100644 (file)
@@ -111,6 +111,10 @@ struct l2tp_net {
        spinlock_t l2tp_session_hlist_lock;
 };
 
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
        BUG_ON(!net);
@@ -118,6 +122,34 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
        return net_generic(net, l2tp_net_id);
 }
 
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+       atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+       if (atomic_dec_and_test(&tunnel->ref_count))
+               l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+               printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+               l2tp_tunnel_inc_refcount_1(_t);                         \
+       } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+               printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+               l2tp_tunnel_dec_refcount_1(_t);                         \
+       } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
 /* Session hash global list for L2TPv3.
  * The session_id SHOULD be random according to RFC3931, but several
  * L2TP implementations use incrementing session_ids.  So we do a real
@@ -699,8 +731,8 @@ EXPORT_SYMBOL(l2tp_recv_common);
  * Returns 1 if the packet was not a good data packet and could not be
  * forwarded.  All such packets are passed up to userspace to deal with.
  */
-int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
-                      int (*payload_hook)(struct sk_buff *skb))
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+                             int (*payload_hook)(struct sk_buff *skb))
 {
        struct l2tp_session *session = NULL;
        unsigned char *ptr, *optr;
@@ -812,7 +844,6 @@ error:
 
        return 1;
 }
-EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
 
 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
  * Return codes:
@@ -922,7 +953,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
        return bufp - optr;
 }
 
-int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+                         size_t data_len)
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
        unsigned int len = skb->len;
@@ -970,7 +1002,6 @@ int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t dat
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(l2tp_xmit_core);
 
 /* Automatically called when the skb is freed.
  */
@@ -1089,7 +1120,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
  * The tunnel context is deleted only when all session sockets have been
  * closed.
  */
-void l2tp_tunnel_destruct(struct sock *sk)
+static void l2tp_tunnel_destruct(struct sock *sk)
 {
        struct l2tp_tunnel *tunnel;
 
@@ -1128,11 +1159,10 @@ void l2tp_tunnel_destruct(struct sock *sk)
 end:
        return;
 }
-EXPORT_SYMBOL(l2tp_tunnel_destruct);
 
 /* When the tunnel is closed, all the attached sessions need to go too.
  */
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
 {
        int hash;
        struct hlist_node *walk;
@@ -1193,12 +1223,11 @@ again:
        }
        write_unlock_bh(&tunnel->hlist_lock);
 }
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
 
 /* Really kill the tunnel.
  * Come here only when all sessions have been cleared from the tunnel.
  */
-void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
 {
        struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
 
@@ -1217,7 +1246,6 @@ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
        atomic_dec(&l2tp_tunnel_count);
        kfree(tunnel);
 }
-EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
 
 /* Create a socket for the tunnel, if one isn't set up by
  * userspace. This is used for static tunnels where there is no
@@ -1512,7 +1540,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
 /* We come here whenever a session's send_seq, cookie_len or
  * l2specific_len parameters are set.
  */
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 {
        if (version == L2TP_HDR_VER_2) {
                session->hdr_len = 6;
@@ -1525,7 +1553,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
        }
 
 }
-EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
 
 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
 {
index f0f318e..a16a48e 100644 (file)
@@ -231,48 +231,15 @@ extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_i
 extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
 extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
 extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 extern void l2tp_session_free(struct l2tp_session *session);
 extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
 extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
 
-extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
 extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-extern void l2tp_tunnel_destruct(struct sock *sk);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 
 extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
 extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
 
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
-       atomic_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
-       if (atomic_dec_and_test(&tunnel->ref_count))
-               l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
-               printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
-               l2tp_tunnel_inc_refcount_1(_t);                         \
-       } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
-               printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
-               l2tp_tunnel_dec_refcount_1(_t);                         \
-       } while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
  */
index 1c770c0..0bf6a59 100644 (file)
@@ -576,7 +576,7 @@ out:
        return copied;
 }
 
-struct proto l2tp_ip_prot = {
+static struct proto l2tp_ip_prot = {
        .name              = "L2TP/IP",
        .owner             = THIS_MODULE,
        .init              = l2tp_ip_open,
index ff60c02..239c483 100644 (file)
@@ -456,6 +456,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        if (!sta)
                return NULL;
 
+       sta->last_rx = jiffies;
        set_sta_flags(sta, WLAN_STA_AUTHORIZED);
 
        /* make sure mandatory rates are always added */
index 22bc42b..6b322fa 100644 (file)
@@ -748,7 +748,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                hw->queues = IEEE80211_MAX_QUEUES;
 
        local->workqueue =
-               create_singlethread_workqueue(wiphy_name(local->hw.wiphy));
+               alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
        if (!local->workqueue) {
                result = -ENOMEM;
                goto fail_workqueue;
@@ -962,12 +962,6 @@ static void __exit ieee80211_exit(void)
        rc80211_minstrel_ht_exit();
        rc80211_minstrel_exit();
 
-       /*
-        * For key todo, it'll be empty by now but the work
-        * might still be scheduled.
-        */
-       flush_scheduled_work();
-
        if (mesh_allocated)
                ieee80211s_stop();
 
index 809cf23..33f7699 100644 (file)
@@ -329,6 +329,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
                 * if needed.
                 */
                for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+                       /* Skip invalid rates */
+                       if (info->control.rates[i].idx < 0)
+                               break;
                        /* Rate masking supports only legacy rates for now */
                        if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
                                continue;
index 4328825..1534f2b 100644 (file)
@@ -525,6 +525,7 @@ config NETFILTER_XT_TARGET_TPROXY
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
        select NF_DEFRAG_IPV4
+       select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
          This option adds a `TPROXY' target, which is somewhat similar to
          REDIRECT.  It can only be used in the mangle table and is useful
@@ -927,6 +928,7 @@ config NETFILTER_XT_MATCH_SOCKET
        depends on NETFILTER_ADVANCED
        depends on !NF_CONNTRACK || NF_CONNTRACK
        select NF_DEFRAG_IPV4
+       select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
          This option adds a `socket' match, which can be used to match
          packets for which a TCP or UDP socket lookup finds a valid socket.
index 19c482c..640678f 100644 (file)
@@ -21,7 +21,9 @@
 #include <linux/netfilter_ipv4/ip_tables.h>
 
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_TPROXY_HAVE_IPV6 1
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
@@ -172,7 +174,7 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
        return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
 
 static inline const struct in6_addr *
 tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
@@ -372,7 +374,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
                .hooks          = 1 << NF_INET_PRE_ROUTING,
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
        {
                .name           = "TPROXY",
                .family         = NFPROTO_IPV6,
@@ -391,7 +393,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
 static int __init tproxy_tg_init(void)
 {
        nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
        nf_defrag_ipv6_enable();
 #endif
 
index 2dbd4c8..d94a858 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/skbuff.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <net/icmp.h>
 #include <net/inet_sock.h>
 #include <net/netfilter/nf_tproxy_core.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_SOCKET_HAVE_IPV6 1
+#include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#endif
 
 #include <linux/netfilter/xt_socket.h>
 
@@ -186,7 +190,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
        return socket_match(skb, par, par->matchinfo);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
 
 static int
 extract_icmp6_fields(const struct sk_buff *skb,
@@ -331,7 +335,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
                                  (1 << NF_INET_LOCAL_IN),
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
        {
                .name           = "socket",
                .revision       = 1,
@@ -348,7 +352,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
 static int __init socket_mt_init(void)
 {
        nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
        nf_defrag_ipv6_enable();
 #endif
 
index cd96ed3..478181d 100644 (file)
@@ -83,9 +83,9 @@ struct netlink_sock {
        struct module           *module;
 };
 
-struct listeners_rcu_head {
-       struct rcu_head rcu_head;
-       void *ptr;
+struct listeners {
+       struct rcu_head         rcu;
+       unsigned long           masks[0];
 };
 
 #define NETLINK_KERNEL_SOCKET  0x1
@@ -119,7 +119,7 @@ struct nl_pid_hash {
 struct netlink_table {
        struct nl_pid_hash hash;
        struct hlist_head mc_list;
-       unsigned long *listeners;
+       struct listeners __rcu *listeners;
        unsigned int nl_nonroot;
        unsigned int groups;
        struct mutex *cb_mutex;
@@ -338,7 +338,7 @@ netlink_update_listeners(struct sock *sk)
                        if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
                                mask |= nlk_sk(sk)->groups[i];
                }
-               tbl->listeners[i] = mask;
+               tbl->listeners->masks[i] = mask;
        }
        /* this function is only called with the netlink table "grabbed", which
         * makes sure updates are visible before bind or setsockopt return. */
@@ -936,7 +936,7 @@ EXPORT_SYMBOL(netlink_unicast);
 int netlink_has_listeners(struct sock *sk, unsigned int group)
 {
        int res = 0;
-       unsigned long *listeners;
+       struct listeners *listeners;
 
        BUG_ON(!netlink_is_kernel(sk));
 
@@ -944,7 +944,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
        listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
 
        if (group - 1 < nl_table[sk->sk_protocol].groups)
-               res = test_bit(group - 1, listeners);
+               res = test_bit(group - 1, listeners->masks);
 
        rcu_read_unlock();
 
@@ -1498,7 +1498,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
        struct socket *sock;
        struct sock *sk;
        struct netlink_sock *nlk;
-       unsigned long *listeners = NULL;
+       struct listeners *listeners = NULL;
 
        BUG_ON(!nl_table);
 
@@ -1523,8 +1523,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
        if (groups < 32)
                groups = 32;
 
-       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
-                           GFP_KERNEL);
+       listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
        if (!listeners)
                goto out_sock_release;
 
@@ -1541,7 +1540,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
        netlink_table_grab();
        if (!nl_table[unit].registered) {
                nl_table[unit].groups = groups;
-               nl_table[unit].listeners = listeners;
+               rcu_assign_pointer(nl_table[unit].listeners, listeners);
                nl_table[unit].cb_mutex = cb_mutex;
                nl_table[unit].module = module;
                nl_table[unit].registered = 1;
@@ -1572,43 +1571,28 @@ netlink_kernel_release(struct sock *sk)
 EXPORT_SYMBOL(netlink_kernel_release);
 
 
-static void netlink_free_old_listeners(struct rcu_head *rcu_head)
+static void listeners_free_rcu(struct rcu_head *head)
 {
-       struct listeners_rcu_head *lrh;
-
-       lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
-       kfree(lrh->ptr);
+       kfree(container_of(head, struct listeners, rcu));
 }
 
 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
 {
-       unsigned long *listeners, *old = NULL;
-       struct listeners_rcu_head *old_rcu_head;
+       struct listeners *new, *old;
        struct netlink_table *tbl = &nl_table[sk->sk_protocol];
 
        if (groups < 32)
                groups = 32;
 
        if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
-               listeners = kzalloc(NLGRPSZ(groups) +
-                                   sizeof(struct listeners_rcu_head),
-                                   GFP_ATOMIC);
-               if (!listeners)
+               new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+               if (!new)
                        return -ENOMEM;
-               old = tbl->listeners;
-               memcpy(listeners, old, NLGRPSZ(tbl->groups));
-               rcu_assign_pointer(tbl->listeners, listeners);
-               /*
-                * Free the old memory after an RCU grace period so we
-                * don't leak it. We use call_rcu() here in order to be
-                * able to call this function from atomic contexts. The
-                * allocation of this memory will have reserved enough
-                * space for struct listeners_rcu_head at the end.
-                */
-               old_rcu_head = (void *)(tbl->listeners +
-                                       NLGRPLONGS(tbl->groups));
-               old_rcu_head->ptr = old;
-               call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
+               old = rcu_dereference_raw(tbl->listeners);
+               memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+               rcu_assign_pointer(tbl->listeners, new);
+
+               call_rcu(&old->rcu, listeners_free_rcu);
        }
        tbl->groups = groups;
 
@@ -2104,18 +2088,17 @@ static void __net_exit netlink_net_exit(struct net *net)
 
 static void __init netlink_add_usersock_entry(void)
 {
-       unsigned long *listeners;
+       struct listeners *listeners;
        int groups = 32;
 
-       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
-                           GFP_KERNEL);
+       listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
        if (!listeners)
-               panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+               panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
 
        netlink_table_grab();
 
        nl_table[NETLINK_USERSOCK].groups = groups;
-       nl_table[NETLINK_USERSOCK].listeners = listeners;
+       rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
 
index d14bbf9..4b9f891 100644 (file)
@@ -1167,7 +1167,7 @@ static int ignore_request(struct wiphy *wiphy,
                                return 0;
                        return -EALREADY;
                }
-               return REG_INTERSECT;
+               return 0;
        case NL80211_REGDOM_SET_BY_DRIVER:
                if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
                        if (regdom_changes(pending_request->alpha2))
index 51a3d38..9890cf2 100644 (file)
@@ -1721,7 +1721,6 @@ printk (KERN_INFO "FKS: es_rec_set_recmask mask = %x\n", mask);
                                left  = value & 0x000000ff;
                                right = (value & 0x0000ff00) >> 8;
                        } else {                                /* Turn it off (3)  */
-                               left  = 0;
                                left  = 0;
                                right = 0;
                        }
index 82ebeb9..93fa59c 100644 (file)
@@ -5326,6 +5326,82 @@ again:
        return 0;
 }
 
+static int stac92hd83xxx_set_system_btl_amp(struct hda_codec *codec)
+{
+       if (codec->vendor_id != 0x111d7605 &&
+           codec->vendor_id != 0x111d76d1)
+               return 0;
+
+       switch (codec->subsystem_id) {
+       case 0x103c1618:
+       case 0x103c1619:
+       case 0x103c161a:
+       case 0x103c161b:
+       case 0x103c161c:
+       case 0x103c161d:
+       case 0x103c161e:
+       case 0x103c161f:
+       case 0x103c1620:
+       case 0x103c1621:
+       case 0x103c1622:
+       case 0x103c1623:
+
+       case 0x103c162a:
+       case 0x103c162b:
+
+       case 0x103c1630:
+       case 0x103c1631:
+
+       case 0x103c1633:
+
+       case 0x103c1635:
+
+       case 0x103c164f:
+
+       case 0x103c1676:
+       case 0x103c1677:
+       case 0x103c1678:
+       case 0x103c1679:
+       case 0x103c167a:
+       case 0x103c167b:
+       case 0x103c167c:
+       case 0x103c167d:
+       case 0x103c167e:
+       case 0x103c167f:
+       case 0x103c1680:
+       case 0x103c1681:
+       case 0x103c1682:
+       case 0x103c1683:
+       case 0x103c1684:
+       case 0x103c1685:
+       case 0x103c1686:
+       case 0x103c1687:
+       case 0x103c1688:
+       case 0x103c1689:
+       case 0x103c168a:
+       case 0x103c168b:
+       case 0x103c168c:
+       case 0x103c168d:
+       case 0x103c168e:
+       case 0x103c168f:
+       case 0x103c1690:
+       case 0x103c1691:
+       case 0x103c1692:
+
+       case 0x103c3587:
+       case 0x103c3588:
+       case 0x103c3589:
+       case 0x103c358a:
+
+       case 0x103c3667:
+       case 0x103c3668:
+               /* set BTL amp level to 13.43dB for louder speaker output */
+               return snd_hda_codec_write_cache(codec, codec->afg, 0,
+                                                0x7F4, 0x14);
+       }
+       return 0;
+}
+
 static int patch_stac92hd83xxx(struct hda_codec *codec)
 {
        struct sigmatel_spec *spec;
@@ -5452,6 +5528,8 @@ again:
                        AC_VERB_SET_CONNECT_SEL, num_dacs);
        }
 
+       stac92hd83xxx_set_system_btl_amp(codec);
+
        codec->proc_widget_hook = stac92hd_proc_hook;
 
        return 0;
index c53955f..de799cd 100644 (file)
@@ -47,7 +47,7 @@ static int ad73311_probe(struct platform_device *pdev)
                        &soc_codec_dev_ad73311, &ad73311_dai, 1);
 }
 
-static int ad73311_remove(struct platform_device *pdev)
+static int __devexit ad73311_remove(struct platform_device *pdev)
 {
        snd_soc_unregister_codec(&pdev->dev);
        return 0;
index e7a40d1..bc22ee9 100644 (file)
@@ -2051,7 +2051,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
        return ret;
 }
 
-static int max98088_i2c_remove(struct i2c_client *client)
+static int __devexit max98088_i2c_remove(struct i2c_client *client)
 {
        snd_soc_unregister_codec(&client->dev);
        kfree(i2c_get_clientdata(client));
index 7a18254..99c046b 100644 (file)
@@ -665,7 +665,7 @@ static int wm9090_i2c_probe(struct i2c_client *i2c,
        return ret;
 }
 
-static int wm9090_i2c_remove(struct i2c_client *i2c)
+static int __devexit wm9090_i2c_remove(struct i2c_client *i2c)
 {
        struct wm9090_priv *wm9090 = i2c_get_clientdata(i2c);
 
index fe15bb2..25f27ec 100644 (file)
@@ -24,7 +24,6 @@
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-of-simple.h>
 
 #include "mpc5200_dma.h"
 #include "mpc5200_psc_ac97.h"
@@ -49,7 +48,7 @@ static struct snd_soc_dai_link pcm030_fabric_dai[] = {
        .codec_dai_name = "wm9712-aux",
        .cpu_dai_name = "mpc5200-psc-ac97.1",
        .platform_name = "mpc5200-pcm-audio",
-       ..codec_name = "wm9712-codec",
+       .codec_name = "wm9712-codec",
 },
 };
 
index 1febf2f..ae4251d 100644 (file)
@@ -62,12 +62,14 @@ struct snd_usb_substream {
        unsigned int syncinterval;  /* P for adaptive mode, 0 otherwise */
        unsigned int freqn;      /* nominal sampling rate in fs/fps in Q16.16 format */
        unsigned int freqm;      /* momentary sampling rate in fs/fps in Q16.16 format */
+       int          freqshift;  /* how much to shift the feedback value to get Q16.16 */
        unsigned int freqmax;    /* maximum sampling rate, used for buffer management */
        unsigned int phase;      /* phase accumulator */
        unsigned int maxpacksize;       /* max packet size in bytes */
        unsigned int maxframesize;      /* max packet size in frames */
        unsigned int curpacksize;       /* current packet size in bytes (for capture) */
        unsigned int curframesize;      /* current packet size in frames (for capture) */
+       unsigned int syncmaxsize;       /* sync endpoint packet size */
        unsigned int fill_max: 1;       /* fill max packet size always */
        unsigned int txfr_quirk:1;      /* allow sub-frame alignment */
        unsigned int fmt_type;          /* USB audio format type (1-3) */
index f49756c..cff3a3c 100644 (file)
@@ -237,6 +237,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        subs->datainterval = fmt->datainterval;
        subs->syncpipe = subs->syncinterval = 0;
        subs->maxpacksize = fmt->maxpacksize;
+       subs->syncmaxsize = 0;
        subs->fill_max = 0;
 
        /* we need a sync pipe in async OUT or adaptive IN mode */
@@ -283,6 +284,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                        subs->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
                else
                        subs->syncinterval = 3;
+               subs->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
        }
 
        /* always fill max packet size */
index 3c650ab..961c9a2 100644 (file)
@@ -132,6 +132,11 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
                            ? get_full_speed_hz(subs->freqm)
                            : get_high_speed_hz(subs->freqm),
                            subs->freqm >> 16, subs->freqm & 0xffff);
+               if (subs->freqshift != INT_MIN)
+                       snd_iprintf(buffer, "    Feedback Format = %d.%d\n",
+                                   (subs->syncmaxsize > 3 ? 32 : 24)
+                                               - (16 - subs->freqshift),
+                                   16 - subs->freqshift);
        } else {
                snd_iprintf(buffer, "  Status: Stop\n");
        }
index 8deeaad..e184349 100644 (file)
@@ -225,6 +225,7 @@ int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
        else
                subs->freqn = get_usb_high_speed_rate(rate);
        subs->freqm = subs->freqn;
+       subs->freqshift = INT_MIN;
        /* calculate max. frequency */
        if (subs->maxpacksize) {
                /* whatever fits into a max. size packet */
@@ -513,11 +514,10 @@ static int retire_paused_capture_urb(struct snd_usb_substream *subs,
 
 
 /*
- * prepare urb for full speed playback sync pipe
+ * prepare urb for playback sync pipe
  *
  * set up the offset and length to receive the current frequency.
  */
-
 static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
                                     struct snd_pcm_runtime *runtime,
                                     struct urb *urb)
@@ -525,103 +525,78 @@ static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
        struct snd_urb_ctx *ctx = urb->context;
 
        urb->dev = ctx->subs->dev; /* we need to set this at each time */
-       urb->iso_frame_desc[0].length = 3;
+       urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
        urb->iso_frame_desc[0].offset = 0;
        return 0;
 }
 
 /*
- * prepare urb for high speed playback sync pipe
+ * process after playback sync complete
  *
- * set up the offset and length to receive the current frequency.
- */
-
-static int prepare_playback_sync_urb_hs(struct snd_usb_substream *subs,
-                                       struct snd_pcm_runtime *runtime,
-                                       struct urb *urb)
-{
-       struct snd_urb_ctx *ctx = urb->context;
-
-       urb->dev = ctx->subs->dev; /* we need to set this at each time */
-       urb->iso_frame_desc[0].length = 4;
-       urb->iso_frame_desc[0].offset = 0;
-       return 0;
-}
-
-/*
- * process after full speed playback sync complete
- *
- * retrieve the current 10.14 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
+ * Full speed devices report feedback values in 10.14 format as samples per
+ * frame, high speed devices in 16.16 format as samples per microframe.
+ * Because the Audio Class 1 spec was written before USB 2.0, many high speed
+ * devices use a wrong interpretation, some others use an entirely different
+ * format.  Therefore, we cannot predict what format any particular device uses
+ * and must detect it automatically.
  */
 static int retire_playback_sync_urb(struct snd_usb_substream *subs,
                                    struct snd_pcm_runtime *runtime,
                                    struct urb *urb)
 {
        unsigned int f;
+       int shift;
        unsigned long flags;
 
-       if (urb->iso_frame_desc[0].status == 0 &&
-           urb->iso_frame_desc[0].actual_length == 3) {
-               f = combine_triple((u8*)urb->transfer_buffer) << 2;
-               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
-                       spin_lock_irqsave(&subs->lock, flags);
-                       subs->freqm = f;
-                       spin_unlock_irqrestore(&subs->lock, flags);
-               }
-       }
-
-       return 0;
-}
+       if (urb->iso_frame_desc[0].status != 0 ||
+           urb->iso_frame_desc[0].actual_length < 3)
+               return 0;
 
-/*
- * process after high speed playback sync complete
- *
- * retrieve the current 12.13 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
- */
-static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs,
-                                      struct snd_pcm_runtime *runtime,
-                                      struct urb *urb)
-{
-       unsigned int f;
-       unsigned long flags;
+       f = le32_to_cpup(urb->transfer_buffer);
+       if (urb->iso_frame_desc[0].actual_length == 3)
+               f &= 0x00ffffff;
+       else
+               f &= 0x0fffffff;
+       if (f == 0)
+               return 0;
 
-       if (urb->iso_frame_desc[0].status == 0 &&
-           urb->iso_frame_desc[0].actual_length == 4) {
-               f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
-               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
-                       spin_lock_irqsave(&subs->lock, flags);
-                       subs->freqm = f;
-                       spin_unlock_irqrestore(&subs->lock, flags);
+       if (unlikely(subs->freqshift == INT_MIN)) {
+               /*
+                * The first time we see a feedback value, determine its format
+                * by shifting it left or right until it matches the nominal
+                * frequency value.  This assumes that the feedback does not
+                * differ from the nominal value more than +50% or -25%.
+                */
+               shift = 0;
+               while (f < subs->freqn - subs->freqn / 4) {
+                       f <<= 1;
+                       shift++;
+               }
+               while (f > subs->freqn + subs->freqn / 2) {
+                       f >>= 1;
+                       shift--;
                }
+               subs->freqshift = shift;
        }
+       else if (subs->freqshift >= 0)
+               f <<= subs->freqshift;
+       else
+               f >>= -subs->freqshift;
 
-       return 0;
-}
-
-/*
- * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete
- *
- * These devices return the number of samples per packet instead of the number
- * of samples per microframe.
- */
-static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs,
-                                          struct snd_pcm_runtime *runtime,
-                                          struct urb *urb)
-{
-       unsigned int f;
-       unsigned long flags;
-
-       if (urb->iso_frame_desc[0].status == 0 &&
-           urb->iso_frame_desc[0].actual_length == 4) {
-               f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
-               f >>= subs->datainterval;
-               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
-                       spin_lock_irqsave(&subs->lock, flags);
-                       subs->freqm = f;
-                       spin_unlock_irqrestore(&subs->lock, flags);
-               }
+       if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
+               /*
+                * If the frequency looks valid, set it.
+                * This value is referred to in prepare_playback_urb().
+                */
+               spin_lock_irqsave(&subs->lock, flags);
+               subs->freqm = f;
+               spin_unlock_irqrestore(&subs->lock, flags);
+       } else {
+               /*
+                * Out of range; maybe the shift value is wrong.
+                * Reset it so that we autodetect again the next time.
+                */
+               subs->freqshift = INT_MIN;
        }
 
        return 0;
@@ -878,21 +853,6 @@ static struct snd_urb_ops audio_urb_ops[2] = {
        },
 };
 
-static struct snd_urb_ops audio_urb_ops_high_speed[2] = {
-       {
-               .prepare =      prepare_nodata_playback_urb,
-               .retire =       retire_playback_urb,
-               .prepare_sync = prepare_playback_sync_urb_hs,
-               .retire_sync =  retire_playback_sync_urb_hs,
-       },
-       {
-               .prepare =      prepare_capture_urb,
-               .retire =       retire_capture_urb,
-               .prepare_sync = prepare_capture_sync_urb_hs,
-               .retire_sync =  retire_capture_sync_urb,
-       },
-};
-
 /*
  * initialize the substream instance.
  */
@@ -909,23 +869,9 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
        subs->direction = stream;
        subs->dev = as->chip->dev;
        subs->txfr_quirk = as->chip->txfr_quirk;
-       if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
-               subs->ops = audio_urb_ops[stream];
-       } else {
-               subs->ops = audio_urb_ops_high_speed[stream];
-               switch (as->chip->usb_id) {
-               case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
-               case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
-               case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
-                       subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
-                       break;
-               case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra 8  */
-               case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
-                       subs->ops.prepare_sync = prepare_playback_sync_urb;
-                       subs->ops.retire_sync = retire_playback_sync_urb;
-                       break;
-               }
-       }
+       subs->ops = audio_urb_ops[stream];
+       if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
+               subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
 
        snd_usb_set_pcm_ops(as->pcm, stream);
 
index 43e3dd2..399751b 100644 (file)
@@ -15,6 +15,23 @@ DESCRIPTION
 This command displays the symbolic event types which can be selected in the
 various perf commands with the -e option.
 
+EVENT MODIFIERS
+---------------
+
+Events can optionally have a modifer by appending a colon and one or
+more modifiers.  Modifiers allow the user to restrict when events are
+counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor.
+
+The 'p' modifier can be used for specifying how precise the instruction
+address should be. The 'p' modifier is currently only implemented for
+Intel PEBS and can be specified multiple times:
+  0 - SAMPLE_IP can have arbitrary skid
+  1 - SAMPLE_IP must have constant skid
+  2 - SAMPLE_IP requested to have 0 skid
+  3 - SAMPLE_IP must have 0 skid
+
+The PEBS implementation now supports up to 2.
+
 RAW HARDWARE EVENT DESCRIPTOR
 -----------------------------
 Even when an event is not available in a symbolic form within perf right now,
index 27d52da..62de1b7 100644 (file)
@@ -16,7 +16,9 @@ or
 or
 'perf probe' --list
 or
-'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+'perf probe' [options] --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+or
+'perf probe' [options] --vars='PROBEPOINT'
 
 DESCRIPTION
 -----------
@@ -31,6 +33,11 @@ OPTIONS
 --vmlinux=PATH::
        Specify vmlinux path which has debuginfo (Dwarf binary).
 
+-m::
+--module=MODNAME::
+       Specify module name in which perf-probe searches probe points
+       or lines.
+
 -s::
 --source=PATH::
        Specify path to kernel source.
@@ -57,6 +64,15 @@ OPTIONS
        Show source code lines which can be probed. This needs an argument
        which specifies a range of the source code. (see LINE SYNTAX for detail)
 
+-V::
+--vars=::
+       Show available local variables at given probe point. The argument
+       syntax is same as PROBE SYNTAX, but NO ARGs.
+
+--externs::
+       (Only for --vars) Show external defined variables in addition to local
+       variables.
+
 -f::
 --force::
        Forcibly add events with existing name.
index 3ee27dc..a91f9f9 100644 (file)
@@ -83,6 +83,10 @@ OPTIONS
 --call-graph::
        Do call-graph (stack chain/backtrace) recording.
 
+-q::
+--quiet::
+       Don't print any message, useful for scripting.
+
 -v::
 --verbose::
        Be more verbose (show counter open errors, etc).
index 199d5e1..2e000c0 100644 (file)
@@ -50,14 +50,17 @@ static struct {
        bool list_events;
        bool force_add;
        bool show_lines;
+       bool show_vars;
+       bool show_ext_vars;
+       bool mod_events;
        int nevents;
        struct perf_probe_event events[MAX_PROBES];
        struct strlist *dellist;
        struct line_range line_range;
+       const char *target_module;
        int max_probe_points;
 } params;
 
-
 /* Parse an event definition. Note that any error must die. */
 static int parse_probe_event(const char *str)
 {
@@ -92,6 +95,7 @@ static int parse_probe_event_argv(int argc, const char **argv)
        len = 0;
        for (i = 0; i < argc; i++)
                len += sprintf(&buf[len], "%s ", argv[i]);
+       params.mod_events = true;
        ret = parse_probe_event(buf);
        free(buf);
        return ret;
@@ -100,9 +104,10 @@ static int parse_probe_event_argv(int argc, const char **argv)
 static int opt_add_probe_event(const struct option *opt __used,
                              const char *str, int unset __used)
 {
-       if (str)
+       if (str) {
+               params.mod_events = true;
                return parse_probe_event(str);
-       else
+       else
                return 0;
 }
 
@@ -110,6 +115,7 @@ static int opt_del_probe_event(const struct option *opt __used,
                               const char *str, int unset __used)
 {
        if (str) {
+               params.mod_events = true;
                if (!params.dellist)
                        params.dellist = strlist__new(true, NULL);
                strlist__add(params.dellist, str);
@@ -130,6 +136,25 @@ static int opt_show_lines(const struct option *opt __used,
 
        return ret;
 }
+
+static int opt_show_vars(const struct option *opt __used,
+                        const char *str, int unset __used)
+{
+       struct perf_probe_event *pev = &params.events[params.nevents];
+       int ret;
+
+       if (!str)
+               return 0;
+
+       ret = parse_probe_event(str);
+       if (!ret && pev->nargs != 0) {
+               pr_err("  Error: '--vars' doesn't accept arguments.\n");
+               return -EINVAL;
+       }
+       params.show_vars = true;
+
+       return ret;
+}
 #endif
 
 static const char * const probe_usage[] = {
@@ -138,7 +163,8 @@ static const char * const probe_usage[] = {
        "perf probe [<options>] --del '[GROUP:]EVENT' ...",
        "perf probe --list",
 #ifdef DWARF_SUPPORT
-       "perf probe --line 'LINEDESC'",
+       "perf probe [<options>] --line 'LINEDESC'",
+       "perf probe [<options>] --vars 'PROBEPOINT'",
 #endif
        NULL
 };
@@ -180,10 +206,17 @@ static const struct option options[] = {
        OPT_CALLBACK('L', "line", NULL,
                     "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
                     "Show source code lines.", opt_show_lines),
+       OPT_CALLBACK('V', "vars", NULL,
+                    "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
+                    "Show accessible variables on PROBEDEF", opt_show_vars),
+       OPT_BOOLEAN('\0', "externs", &params.show_ext_vars,
+                   "Show external variables too (with --vars only)"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_STRING('s', "source", &symbol_conf.source_prefix,
                   "directory", "path to kernel source"),
+       OPT_STRING('m', "module", &params.target_module,
+                  "modname", "target module name"),
 #endif
        OPT__DRY_RUN(&probe_event_dry_run),
        OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
@@ -217,7 +250,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                usage_with_options(probe_usage, options);
 
        if (params.list_events) {
-               if (params.nevents != 0 || params.dellist) {
+               if (params.mod_events) {
                        pr_err("  Error: Don't use --list with --add/--del.\n");
                        usage_with_options(probe_usage, options);
                }
@@ -225,6 +258,10 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                        pr_err("  Error: Don't use --list with --line.\n");
                        usage_with_options(probe_usage, options);
                }
+               if (params.show_vars) {
+                       pr_err(" Error: Don't use --list with --vars.\n");
+                       usage_with_options(probe_usage, options);
+               }
                ret = show_perf_probe_events();
                if (ret < 0)
                        pr_err("  Error: Failed to show event list. (%d)\n",
@@ -234,17 +271,35 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
 
 #ifdef DWARF_SUPPORT
        if (params.show_lines) {
-               if (params.nevents != 0 || params.dellist) {
-                       pr_warning("  Error: Don't use --line with"
-                                  " --add/--del.\n");
+               if (params.mod_events) {
+                       pr_err("  Error: Don't use --line with"
+                              " --add/--del.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               if (params.show_vars) {
+                       pr_err(" Error: Don't use --line with --vars.\n");
                        usage_with_options(probe_usage, options);
                }
 
-               ret = show_line_range(&params.line_range);
+               ret = show_line_range(&params.line_range, params.target_module);
                if (ret < 0)
                        pr_err("  Error: Failed to show lines. (%d)\n", ret);
                return ret;
        }
+       if (params.show_vars) {
+               if (params.mod_events) {
+                       pr_err("  Error: Don't use --vars with"
+                              " --add/--del.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               ret = show_available_vars(params.events, params.nevents,
+                                         params.max_probe_points,
+                                         params.target_module,
+                                         params.show_ext_vars);
+               if (ret < 0)
+                       pr_err("  Error: Failed to show vars. (%d)\n", ret);
+               return ret;
+       }
 #endif
 
        if (params.dellist) {
@@ -258,8 +313,9 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
 
        if (params.nevents) {
                ret = add_perf_probe_events(params.events, params.nevents,
-                                           params.force_add,
-                                           params.max_probe_points);
+                                           params.max_probe_points,
+                                           params.target_module,
+                                           params.force_add);
                if (ret < 0) {
                        pr_err("  Error: Failed to add events. (%d)\n", ret);
                        return ret;
index ff77b80..4e75583 100644 (file)
@@ -353,7 +353,7 @@ try_again:
                }
 
                if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
-                       perror("Unable to read perf file descriptor\n");
+                       perror("Unable to read perf file descriptor");
                        exit(-1);
                }
 
@@ -626,7 +626,7 @@ static int __cmd_record(int argc, const char **argv)
 
        nr_cpus = read_cpu_map(cpu_list);
        if (nr_cpus < 1) {
-               perror("failed to collect number of CPUs\n");
+               perror("failed to collect number of CPUs");
                return -1;
        }
 
@@ -761,6 +761,9 @@ static int __cmd_record(int argc, const char **argv)
                }
        }
 
+       if (quiet)
+               return 0;
+
        fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 
        /*
@@ -820,6 +823,7 @@ static const struct option options[] = {
                    "do call-graph (stack chain/backtrace) recording"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
+       OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
        OPT_BOOLEAN('s', "stat", &inherit_stat,
                    "per thread counts"),
        OPT_BOOLEAN('d', "data", &sample_address,
index 40a6a29..2f8df45 100644 (file)
@@ -46,9 +46,6 @@ static struct scripting_ops   *scripting_ops;
 
 static void setup_scripting(void)
 {
-       /* make sure PERF_EXEC_PATH is set for scripts */
-       perf_set_argv_exec_path(perf_exec_path());
-
        setup_perl_scripting();
        setup_python_scripting();
 
@@ -285,7 +282,7 @@ static int parse_scriptname(const struct option *opt __used,
                script++;
        } else {
                script = str;
-               ext = strchr(script, '.');
+               ext = strrchr(script, '.');
                if (!ext) {
                        fprintf(stderr, "invalid script extension");
                        return -1;
@@ -593,6 +590,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
                suffix = REPORT_SUFFIX;
        }
 
+       /* make sure PERF_EXEC_PATH is set for scripts */
+       perf_set_argv_exec_path(perf_exec_path());
+
        if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
                char *record_script_path, *report_script_path;
                int live_pipe[2];
@@ -625,12 +625,13 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
                        dup2(live_pipe[1], 1);
                        close(live_pipe[0]);
 
-                       __argv = malloc(5 * sizeof(const char *));
+                       __argv = malloc(6 * sizeof(const char *));
                        __argv[0] = "/bin/sh";
                        __argv[1] = record_script_path;
-                       __argv[2] = "-o";
-                       __argv[3] = "-";
-                       __argv[4] = NULL;
+                       __argv[2] = "-q";
+                       __argv[3] = "-o";
+                       __argv[4] = "-";
+                       __argv[5] = NULL;
 
                        execvp("/bin/sh", (char **)__argv);
                        exit(-1);
index e3a5e55..4028d92 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/failed-syscalls.pl $comm
index d83070b..ba25f4d 100644 (file)
@@ -7,7 +7,7 @@ if [ $# -lt 1 ] ; then
 fi
 comm=$1
 shift
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-file.pl $comm
 
 
 
index 7ef4698..641a3f5 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # description: system-wide r/w activity
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-pid.pl
 
 
 
index 93e698c..4918dba 100644 (file)
@@ -17,7 +17,7 @@ if [ "$n_args" -gt 0 ] ; then
     interval=$1
     shift
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rwtop.pl $interval
 
 
 
index a0d898f..49052eb 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # description: system-wide min/max/avg wakeup latency
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/wakeup-latency.pl
 
 
 
index 3508113..df0c65f 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # description: workqueue stats (ins/exe/create/destroy)
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl
 
 
 
index 9689bc0..13cc02b 100644 (file)
@@ -6,6 +6,14 @@
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
 
+import errno, os
+
+FUTEX_WAIT = 0
+FUTEX_WAKE = 1
+FUTEX_PRIVATE_FLAG = 128
+FUTEX_CLOCK_REALTIME = 256
+FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+
 NSECS_PER_SEC    = 1000000000
 
 def avg(total, n):
@@ -24,5 +32,55 @@ def nsecs_str(nsecs):
     str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
     return str
 
+def add_stats(dict, key, value):
+       if not dict.has_key(key):
+               dict[key] = (value, value, value, 1)
+       else:
+               min, max, avg, count = dict[key]
+               if value < min:
+                       min = value
+               if value > max:
+                       max = value
+               avg = (avg + value) / 2
+               dict[key] = (min, max, avg, count + 1)
+
 def clear_term():
     print("\x1b[H\x1b[2J")
+
+audit_package_warned = False
+
+try:
+       import audit
+       machine_to_id = {
+               'x86_64': audit.MACH_86_64,
+               'alpha' : audit.MACH_ALPHA,
+               'ia64'  : audit.MACH_IA64,
+               'ppc'   : audit.MACH_PPC,
+               'ppc64' : audit.MACH_PPC64,
+               's390'  : audit.MACH_S390,
+               's390x' : audit.MACH_S390X,
+               'i386'  : audit.MACH_X86,
+               'i586'  : audit.MACH_X86,
+               'i686'  : audit.MACH_X86,
+       }
+       try:
+               machine_to_id['armeb'] = audit.MACH_ARMEB
+       except:
+               pass
+       machine_id = machine_to_id[os.uname()[4]]
+except:
+       if not audit_package_warned:
+               audit_package_warned = True
+               print "Install the audit-libs-python package to get syscall names"
+
+def syscall_name(id):
+       try:
+               return audit.audit_syscall_to_name(id, machine_id)
+       except:
+               return str(id)
+
+def strerror(nr):
+       try:
+               return errno.errorcode[abs(nr)]
+       except:
+               return "Unknown %d errno" % nr
index 3029354..0358702 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
new file mode 100644 (file)
index 0000000..5ecbb43
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-report b/tools/perf/scripts/python/bin/futex-contention-report
new file mode 100644 (file)
index 0000000..c826813
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: futext contention measurement
+
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py
index c3d0a63..4ad361b 100644 (file)
@@ -2,4 +2,4 @@
 # description: display a process of packet and processing time
 # args: [tx] [rx] [dev=] [debug]
 
-perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@
+perf trace -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@
index 61d05f7..df1791f 100644 (file)
@@ -1,3 +1,3 @@
 #!/bin/bash
 # description: sched migration overview
-perf trace $@ -s ~/libexec/perf-core/scripts/python/sched-migration.py
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
index b01c842..36b409c 100644 (file)
@@ -21,4 +21,4 @@ elif [ "$n_args" -gt 0 ] ; then
     interval=$1
     shift
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/sctop.py $comm $interval
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval
index 9e9d8dd..4eb88c9 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
index dc076b6..cb2f9c5 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
index 0ca0227..acd7848 100644 (file)
@@ -13,21 +13,26 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
 
 from perf_trace_context import *
 from Core import *
+from Util import *
 
-usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+usage = "perf trace -s syscall-counts-by-pid.py [comm|pid]\n";
 
 for_comm = None
+for_pid = None
 
 if len(sys.argv) > 2:
        sys.exit(usage)
 
 if len(sys.argv) > 1:
-       for_comm = sys.argv[1]
+       try:
+               for_pid = int(sys.argv[1])
+       except:
+               for_comm = sys.argv[1]
 
 syscalls = autodict()
 
 def trace_begin():
-       pass
+       print "Press control+C to stop and show the summary"
 
 def trace_end():
        print_error_totals()
@@ -35,9 +40,9 @@ def trace_end():
 def raw_syscalls__sys_exit(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
        id, ret):
-       if for_comm is not None:
-               if common_comm != for_comm:
-                       return
+       if (for_comm and common_comm != for_comm) or \
+          (for_pid  and common_pid  != for_pid ):
+               return
 
        if ret < 0:
                try:
@@ -62,7 +67,7 @@ def print_error_totals():
                    print "\n%s [%d]\n" % (comm, pid),
                    id_keys = syscalls[comm][pid].keys()
                    for id in id_keys:
-                           print "  syscall: %-16d\n" % (id),
+                           print "  syscall: %-16s\n" % syscall_name(id),
                            ret_keys = syscalls[comm][pid][id].keys()
                            for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k),  reverse = True):
-                                   print "    err = %-20d  %10d\n" % (ret, val),
+                                   print "    err = %-20s  %10d\n" % (strerror(ret), val),
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
new file mode 100644 (file)
index 0000000..11e70a3
--- /dev/null
@@ -0,0 +1,50 @@
+# futex contention
+# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Translation of:
+#
+# http://sourceware.org/systemtap/wiki/WSFutexContention
+#
+# to perf python scripting.
+#
+# Measures futex contention
+
+import os, sys
+sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+from Util import *
+
+process_names = {}
+thread_thislock = {}
+thread_blocktime = {}
+
+lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
+process_names = {} # long-lived pid-to-execname mapping
+
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
+                             nr, uaddr, op, val, utime, uaddr2, val3):
+       cmd = op & FUTEX_CMD_MASK
+       if cmd != FUTEX_WAIT:
+               return # we don't care about originators of WAKE events
+
+       process_names[tid] = comm
+       thread_thislock[tid] = uaddr
+       thread_blocktime[tid] = nsecs(s, ns)
+
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
+                            nr, ret):
+       if thread_blocktime.has_key(tid):
+               elapsed = nsecs(s, ns) - thread_blocktime[tid]
+               add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
+               del thread_blocktime[tid]
+               del thread_thislock[tid]
+
+def trace_begin():
+       print "Press control+C to stop and show the summary"
+
+def trace_end():
+       for (tid, lock) in lock_waits:
+               min, max, avg, count = lock_waits[tid, lock]
+               print "%s[%d] lock %x contended %d times, %d avg ns" % \
+                     (process_names[tid], tid, lock, count, avg)
+
index 6cafad4..7a6ec2c 100644 (file)
@@ -8,10 +8,7 @@
 # will be refreshed every [interval] seconds.  The default interval is
 # 3 seconds.
 
-import thread
-import time
-import os
-import sys
+import os, sys, thread, time
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -20,7 +17,7 @@ from perf_trace_context import *
 from Core import *
 from Util import *
 
-usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
+usage = "perf trace -s sctop.py [comm] [interval]\n";
 
 for_comm = None
 default_interval = 3
@@ -71,7 +68,7 @@ def print_syscall_totals(interval):
                for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
                                              reverse = True):
                        try:
-                               print "%-40d  %10d\n" % (id, val),
+                               print "%-40s  %10d\n" % (syscall_name(id), val),
                        except TypeError:
                                pass
                syscalls.clear()
index af722d6..d1ee3ec 100644 (file)
@@ -5,29 +5,33 @@
 # Displays system-wide system call totals, broken down by syscall.
 # If a [comm] arg is specified, only syscalls called by [comm] are displayed.
 
-import os
-import sys
+import os, sys
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
 
 from perf_trace_context import *
 from Core import *
+from Util import syscall_name
 
 usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
 
 for_comm = None
+for_pid = None
 
 if len(sys.argv) > 2:
        sys.exit(usage)
 
 if len(sys.argv) > 1:
-       for_comm = sys.argv[1]
+       try:
+               for_pid = int(sys.argv[1])
+       except:
+               for_comm = sys.argv[1]
 
 syscalls = autodict()
 
 def trace_begin():
-       pass
+       print "Press control+C to stop and show the summary"
 
 def trace_end():
        print_syscall_totals()
@@ -35,9 +39,10 @@ def trace_end():
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
        id, args):
-       if for_comm is not None:
-               if common_comm != for_comm:
-                       return
+
+       if (for_comm and common_comm != for_comm) or \
+          (for_pid  and common_pid  != for_pid ):
+               return
        try:
                syscalls[common_comm][common_pid][id] += 1
        except TypeError:
@@ -61,4 +66,4 @@ def print_syscall_totals():
                    id_keys = syscalls[comm][pid].keys()
                    for id, val in sorted(syscalls[comm][pid].iteritems(), \
                                  key = lambda(k, v): (v, k),  reverse = True):
-                           print "  %-38d  %10d\n" % (id, val),
+                           print "  %-38s  %10d\n" % (syscall_name(id), val),
index f977e85..ea183dc 100644 (file)
@@ -13,6 +13,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
 
 from perf_trace_context import *
 from Core import *
+from Util import syscall_name
 
 usage = "perf trace -s syscall-counts.py [comm]\n";
 
@@ -27,7 +28,7 @@ if len(sys.argv) > 1:
 syscalls = autodict()
 
 def trace_begin():
-       pass
+       print "Press control+C to stop and show the summary"
 
 def trace_end():
        print_syscall_totals()
@@ -55,4 +56,4 @@ def print_syscall_totals():
 
     for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
                                  reverse = True):
-           print "%-40d  %10d\n" % (id, val),
+           print "%-40s  %10d\n" % (syscall_name(id), val),
index f9c7e3a..c8d81b0 100644 (file)
@@ -12,8 +12,8 @@
 #include "debug.h"
 #include "util.h"
 
-int verbose = 0;
-bool dump_trace = false;
+int verbose;
+bool dump_trace = false, quiet = false;
 
 int eprintf(int level, const char *fmt, ...)
 {
index 7a17ee0..7b51408 100644 (file)
@@ -6,7 +6,7 @@
 #include "event.h"
 
 extern int verbose;
-extern bool dump_trace;
+extern bool quiet, dump_trace;
 
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(event_t *event);
index 7857579..b397c03 100644 (file)
@@ -215,6 +215,16 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *self,
        return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
 }
 
+static inline
+struct symbol *machine__find_kernel_function_by_name(struct machine *self,
+                                                    const char *name,
+                                                    struct map **mapp,
+                                                    symbol_filter_t filter)
+{
+       return map_groups__find_function_by_name(&self->kmaps, name, mapp,
+                                                filter);
+}
+
 int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
                                   int verbose, FILE *fp);
 
index fcc16e4..3b6a529 100644 (file)
@@ -74,10 +74,9 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
 static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
 static struct machine machine;
 
-/* Initialize symbol maps and path of vmlinux */
+/* Initialize symbol maps and path of vmlinux/modules */
 static int init_vmlinux(void)
 {
-       struct dso *kernel;
        int ret;
 
        symbol_conf.sort_by_name = true;
@@ -91,33 +90,61 @@ static int init_vmlinux(void)
                goto out;
        }
 
-       ret = machine__init(&machine, "/", 0);
+       ret = machine__init(&machine, "", HOST_KERNEL_ID);
        if (ret < 0)
                goto out;
 
-       kernel = dso__new_kernel(symbol_conf.vmlinux_name);
-       if (kernel == NULL)
-               die("Failed to create kernel dso.");
-
-       ret = __machine__create_kernel_maps(&machine, kernel);
-       if (ret < 0)
-               pr_debug("Failed to create kernel maps.\n");
-
+       if (machine__create_kernel_maps(&machine) < 0) {
+               pr_debug("machine__create_kernel_maps ");
+               goto out;
+       }
 out:
        if (ret < 0)
                pr_warning("Failed to init vmlinux path.\n");
        return ret;
 }
 
+static struct symbol *__find_kernel_function_by_name(const char *name,
+                                                    struct map **mapp)
+{
+       return machine__find_kernel_function_by_name(&machine, name, mapp,
+                                                    NULL);
+}
+
+const char *kernel_get_module_path(const char *module)
+{
+       struct dso *dso;
+
+       if (module) {
+               list_for_each_entry(dso, &machine.kernel_dsos, node) {
+                       if (strncmp(dso->short_name + 1, module,
+                                   dso->short_name_len - 2) == 0)
+                               goto found;
+               }
+               pr_debug("Failed to find module %s.\n", module);
+               return NULL;
+       } else {
+               dso = machine.vmlinux_maps[MAP__FUNCTION]->dso;
+               if (dso__load_vmlinux_path(dso,
+                        machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
+                       pr_debug("Failed to load kernel map.\n");
+                       return NULL;
+               }
+       }
+found:
+       return dso->long_name;
+}
+
 #ifdef DWARF_SUPPORT
-static int open_vmlinux(void)
+static int open_vmlinux(const char *module)
 {
-       if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
-               pr_debug("Failed to load kernel map.\n");
-               return -EINVAL;
+       const char *path = kernel_get_module_path(module);
+       if (!path) {
+               pr_err("Failed to find path of %s module", module ?: "kernel");
+               return -ENOENT;
        }
-       pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name);
-       return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
+       pr_debug("Try to open %s\n", path);
+       return open(path, O_RDONLY);
 }
 
 /*
@@ -125,20 +152,19 @@ static int open_vmlinux(void)
  * Currently only handles kprobes.
  */
 static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
-                                      struct perf_probe_point *pp)
+                                       struct perf_probe_point *pp)
 {
        struct symbol *sym;
-       int fd, ret = -ENOENT;
+       struct map *map;
+       u64 addr;
+       int ret = -ENOENT;
 
-       sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
-                                      tp->symbol, NULL);
+       sym = __find_kernel_function_by_name(tp->symbol, &map);
        if (sym) {
-               fd = open_vmlinux();
-               if (fd >= 0) {
-                       ret = find_perf_probe_point(fd,
-                                                sym->start + tp->offset, pp);
-                       close(fd);
-               }
+               addr = map->unmap_ip(map, sym->start + tp->offset);
+               pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+                        tp->offset, addr);
+               ret = find_perf_probe_point((unsigned long)addr, pp);
        }
        if (ret <= 0) {
                pr_debug("Failed to find corresponding probes from "
@@ -156,12 +182,12 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
 /* Try to find perf_probe_event with debuginfo */
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                           struct probe_trace_event **tevs,
-                                          int max_tevs)
+                                          int max_tevs, const char *module)
 {
        bool need_dwarf = perf_probe_event_need_dwarf(pev);
        int fd, ntevs;
 
-       fd = open_vmlinux();
+       fd = open_vmlinux(module);
        if (fd < 0) {
                if (need_dwarf) {
                        pr_warning("Failed to open debuginfo file.\n");
@@ -300,7 +326,7 @@ error:
  * Show line-range always requires debuginfo to find source file and
  * line number.
  */
-int show_line_range(struct line_range *lr)
+int show_line_range(struct line_range *lr, const char *module)
 {
        int l = 1;
        struct line_node *ln;
@@ -313,7 +339,7 @@ int show_line_range(struct line_range *lr)
        if (ret < 0)
                return ret;
 
-       fd = open_vmlinux();
+       fd = open_vmlinux(module);
        if (fd < 0) {
                pr_warning("Failed to open debuginfo file.\n");
                return fd;
@@ -378,11 +404,84 @@ end:
        return ret;
 }
 
+static int show_available_vars_at(int fd, struct perf_probe_event *pev,
+                                 int max_vls, bool externs)
+{
+       char *buf;
+       int ret, i;
+       struct str_node *node;
+       struct variable_list *vls = NULL, *vl;
+
+       buf = synthesize_perf_probe_point(&pev->point);
+       if (!buf)
+               return -EINVAL;
+       pr_debug("Searching variables at %s\n", buf);
+
+       ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
+       if (ret > 0) {
+               /* Some variables were found */
+               fprintf(stdout, "Available variables at %s\n", buf);
+               for (i = 0; i < ret; i++) {
+                       vl = &vls[i];
+                       /*
+                        * A probe point might be converted to
+                        * several trace points.
+                        */
+                       fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+                               vl->point.offset);
+                       free(vl->point.symbol);
+                       if (vl->vars) {
+                               strlist__for_each(node, vl->vars)
+                                       fprintf(stdout, "\t\t%s\n", node->s);
+                               strlist__delete(vl->vars);
+                       } else
+                               fprintf(stdout, "(No variables)\n");
+               }
+               free(vls);
+       } else
+               pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+
+       free(buf);
+       return ret;
+}
+
+/* Show available variables on given probe point */
+int show_available_vars(struct perf_probe_event *pevs, int npevs,
+                       int max_vls, const char *module, bool externs)
+{
+       int i, fd, ret = 0;
+
+       ret = init_vmlinux();
+       if (ret < 0)
+               return ret;
+
+       fd = open_vmlinux(module);
+       if (fd < 0) {
+               pr_warning("Failed to open debuginfo file.\n");
+               return fd;
+       }
+
+       setup_pager();
+
+       for (i = 0; i < npevs && ret >= 0; i++)
+               ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
+
+       close(fd);
+       return ret;
+}
+
 #else  /* !DWARF_SUPPORT */
 
 static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
-                                      struct perf_probe_point *pp)
+                                       struct perf_probe_point *pp)
 {
+       struct symbol *sym;
+
+       sym = __find_kernel_function_by_name(tp->symbol, NULL);
+       if (!sym) {
+               pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
+               return -ENOENT;
+       }
        pp->function = strdup(tp->symbol);
        if (pp->function == NULL)
                return -ENOMEM;
@@ -394,7 +493,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
 
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                struct probe_trace_event **tevs __unused,
-                               int max_tevs __unused)
+                               int max_tevs __unused, const char *mod __unused)
 {
        if (perf_probe_event_need_dwarf(pev)) {
                pr_warning("Debuginfo-analysis is not supported.\n");
@@ -403,12 +502,19 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
        return 0;
 }
 
-int show_line_range(struct line_range *lr __unused)
+int show_line_range(struct line_range *lr __unused, const char *module __unused)
 {
        pr_warning("Debuginfo-analysis is not supported.\n");
        return -ENOSYS;
 }
 
+int show_available_vars(struct perf_probe_event *pevs __unused,
+                       int npevs __unused, int max_vls __unused,
+                       const char *module __unused, bool externs __unused)
+{
+       pr_warning("Debuginfo-analysis is not supported.\n");
+       return -ENOSYS;
+}
 #endif
 
 int parse_line_range_desc(const char *arg, struct line_range *lr)
@@ -1087,7 +1193,7 @@ error:
 }
 
 static int convert_to_perf_probe_event(struct probe_trace_event *tev,
-                               struct perf_probe_event *pev)
+                                      struct perf_probe_event *pev)
 {
        char buf[64] = "";
        int i, ret;
@@ -1516,14 +1622,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
 
 static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                                          struct probe_trace_event **tevs,
-                                         int max_tevs)
+                                         int max_tevs, const char *module)
 {
        struct symbol *sym;
        int ret = 0, i;
        struct probe_trace_event *tev;
 
        /* Convert perf_probe_event with debuginfo */
-       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs);
+       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module);
        if (ret != 0)
                return ret;
 
@@ -1572,8 +1678,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
        }
 
        /* Currently just checking function name from symbol map */
-       sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
-                                      tev->point.symbol, NULL);
+       sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
        if (!sym) {
                pr_warning("Kernel symbol \'%s\' not found.\n",
                           tev->point.symbol);
@@ -1596,7 +1701,7 @@ struct __event_package {
 };
 
 int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
-                         bool force_add, int max_tevs)
+                         int max_tevs, const char *module, bool force_add)
 {
        int i, j, ret;
        struct __event_package *pkgs;
@@ -1617,7 +1722,9 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
                pkgs[i].pev = &pevs[i];
                /* Convert with or without debuginfo */
                ret  = convert_to_probe_trace_events(pkgs[i].pev,
-                                                     &pkgs[i].tevs, max_tevs);
+                                                    &pkgs[i].tevs,
+                                                    max_tevs,
+                                                    module);
                if (ret < 0)
                        goto end;
                pkgs[i].ntevs = ret;
index 5af3924..5accbed 100644 (file)
@@ -90,6 +90,12 @@ struct line_range {
        struct list_head        line_list;      /* Visible lines */
 };
 
+/* List of variables */
+struct variable_list {
+       struct probe_trace_point        point;  /* Actual probepoint */
+       struct strlist                  *vars;  /* Available variables */
+};
+
 /* Command string to events */
 extern int parse_perf_probe_command(const char *cmd,
                                    struct perf_probe_event *pev);
@@ -109,12 +115,18 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev);
 /* Command string to line-range */
 extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
 
+/* Internal use: Return kernel/module path */
+extern const char *kernel_get_module_path(const char *module);
 
 extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
-                                bool force_add, int max_probe_points);
+                                int max_probe_points, const char *module,
+                                bool force_add);
 extern int del_perf_probe_events(struct strlist *dellist);
 extern int show_perf_probe_events(void);
-extern int show_line_range(struct line_range *lr);
+extern int show_line_range(struct line_range *lr, const char *module);
+extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
+                              int max_probe_points, const char *module,
+                              bool externs);
 
 
 /* Maximum index number of event-name postfix */
index 32b81f7..3991d73 100644 (file)
@@ -116,6 +116,101 @@ static void line_list__free(struct list_head *head)
        }
 }
 
+/* Dwarf FL wrappers */
+
+static int __linux_kernel_find_elf(Dwfl_Module *mod,
+                                  void **userdata,
+                                  const char *module_name,
+                                  Dwarf_Addr base,
+                                  char **file_name, Elf **elfp)
+{
+       int fd;
+       const char *path = kernel_get_module_path(module_name);
+
+       if (path) {
+               fd = open(path, O_RDONLY);
+               if (fd >= 0) {
+                       *file_name = strdup(path);
+                       return fd;
+               }
+       }
+       /* If failed, try to call standard method */
+       return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
+                                         file_name, elfp);
+}
+
+static char *debuginfo_path;   /* Currently dummy */
+
+static const Dwfl_Callbacks offline_callbacks = {
+       .find_debuginfo = dwfl_standard_find_debuginfo,
+       .debuginfo_path = &debuginfo_path,
+
+       .section_address = dwfl_offline_section_address,
+
+       /* We use this table for core files too.  */
+       .find_elf = dwfl_build_id_find_elf,
+};
+
+static const Dwfl_Callbacks kernel_callbacks = {
+       .find_debuginfo = dwfl_standard_find_debuginfo,
+       .debuginfo_path = &debuginfo_path,
+
+       .find_elf = __linux_kernel_find_elf,
+       .section_address = dwfl_linux_kernel_module_section_address,
+};
+
+/* Get a Dwarf from offline image */
+static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias)
+{
+       Dwfl_Module *mod;
+       Dwarf *dbg = NULL;
+
+       if (!dwflp)
+               return NULL;
+
+       *dwflp = dwfl_begin(&offline_callbacks);
+       if (!*dwflp)
+               return NULL;
+
+       mod = dwfl_report_offline(*dwflp, "", "", fd);
+       if (!mod)
+               goto error;
+
+       dbg = dwfl_module_getdwarf(mod, bias);
+       if (!dbg) {
+error:
+               dwfl_end(*dwflp);
+               *dwflp = NULL;
+       }
+       return dbg;
+}
+
+/* Get a Dwarf from live kernel image */
+static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp,
+                                         Dwarf_Addr *bias)
+{
+       Dwarf *dbg;
+
+       if (!dwflp)
+               return NULL;
+
+       *dwflp = dwfl_begin(&kernel_callbacks);
+       if (!*dwflp)
+               return NULL;
+
+       /* Load the kernel dwarves: Don't care the result here */
+       dwfl_linux_kernel_report_kernel(*dwflp);
+       dwfl_linux_kernel_report_modules(*dwflp);
+
+       dbg = dwfl_addrdwarf(*dwflp, addr, bias);
+       /* Here, check whether we could get a real dwarf */
+       if (!dbg) {
+               dwfl_end(*dwflp);
+               *dwflp = NULL;
+       }
+       return dbg;
+}
+
 /* Dwarf wrappers */
 
 /* Find the realpath of the target file. */
@@ -160,26 +255,44 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
        return name ? (strcmp(tname, name) == 0) : false;
 }
 
-/* Get type die, but skip qualifiers and typedef */
-static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+/* Get type die */
+static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
 {
        Dwarf_Attribute attr;
+
+       if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
+           dwarf_formref_die(&attr, die_mem))
+               return die_mem;
+       else
+               return NULL;
+}
+
+/* Get a type die, but skip qualifiers */
+static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
        int tag;
 
        do {
-               if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL ||
-                   dwarf_formref_die(&attr, die_mem) == NULL)
-                       return NULL;
-
-               tag = dwarf_tag(die_mem);
-               vr_die = die_mem;
+               vr_die = die_get_type(vr_die, die_mem);
+               if (!vr_die)
+                       break;
+               tag = dwarf_tag(vr_die);
        } while (tag == DW_TAG_const_type ||
                 tag == DW_TAG_restrict_type ||
                 tag == DW_TAG_volatile_type ||
-                tag == DW_TAG_shared_type ||
-                tag == DW_TAG_typedef);
+                tag == DW_TAG_shared_type);
+
+       return vr_die;
+}
 
-       return die_mem;
+/* Get a type die, but skip qualifiers and typedef */
+static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+       do {
+               vr_die = __die_get_real_type(vr_die, die_mem);
+       } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
+
+       return vr_die;
 }
 
 static bool die_is_signed_type(Dwarf_Die *tp_die)
@@ -320,25 +433,35 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
        return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
 }
 
+struct __find_variable_param {
+       const char *name;
+       Dwarf_Addr addr;
+};
+
 static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
 {
-       const char *name = data;
+       struct __find_variable_param *fvp = data;
        int tag;
 
        tag = dwarf_tag(die_mem);
        if ((tag == DW_TAG_formal_parameter ||
             tag == DW_TAG_variable) &&
-           die_compare_name(die_mem, name))
+           die_compare_name(die_mem, fvp->name))
                return DIE_FIND_CB_FOUND;
 
-       return DIE_FIND_CB_CONTINUE;
+       if (dwarf_haspc(die_mem, fvp->addr))
+               return DIE_FIND_CB_CONTINUE;
+       else
+               return DIE_FIND_CB_SIBLING;
 }
 
-/* Find a variable called 'name' */
-static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
-                                   Dwarf_Die *die_mem)
+/* Find a variable called 'name' at given address */
+static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+                                      Dwarf_Addr addr, Dwarf_Die *die_mem)
 {
-       return die_find_child(sp_die, __die_find_variable_cb, (void *)name,
+       struct __find_variable_param fvp = { .name = name, .addr = addr};
+
+       return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
                              die_mem);
 }
 
@@ -361,6 +484,60 @@ static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
                              die_mem);
 }
 
+/* Get the name of given variable DIE */
+static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+{
+       Dwarf_Die type;
+       int tag, ret, ret2;
+       const char *tmp = "";
+
+       if (__die_get_real_type(vr_die, &type) == NULL)
+               return -ENOENT;
+
+       tag = dwarf_tag(&type);
+       if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+               tmp = "*";
+       else if (tag == DW_TAG_subroutine_type) {
+               /* Function pointer */
+               ret = snprintf(buf, len, "(function_type)");
+               return (ret >= len) ? -E2BIG : ret;
+       } else {
+               if (!dwarf_diename(&type))
+                       return -ENOENT;
+               if (tag == DW_TAG_union_type)
+                       tmp = "union ";
+               else if (tag == DW_TAG_structure_type)
+                       tmp = "struct ";
+               /* Write a base name */
+               ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
+               return (ret >= len) ? -E2BIG : ret;
+       }
+       ret = die_get_typename(&type, buf, len);
+       if (ret > 0) {
+               ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
+               ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+       }
+       return ret;
+}
+
+/* Get the name and type of given variable DIE, stored as "type\tname" */
+static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+{
+       int ret, ret2;
+
+       ret = die_get_typename(vr_die, buf, len);
+       if (ret < 0) {
+               pr_debug("Failed to get type, make it unknown.\n");
+               ret = snprintf(buf, len, "(unknown_type)");
+       }
+       if (ret > 0) {
+               ret2 = snprintf(buf + ret, len - ret, "\t%s",
+                               dwarf_diename(vr_die));
+               ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+       }
+       return ret;
+}
+
 /*
  * Probe finder related functions
  */
@@ -374,8 +551,13 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
        return ref;
 }
 
-/* Show a location */
-static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
+/*
+ * Convert a location into trace_arg.
+ * If tvar == NULL, this just checks variable can be converted.
+ */
+static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
+                                    Dwarf_Op *fb_ops,
+                                    struct probe_trace_arg *tvar)
 {
        Dwarf_Attribute attr;
        Dwarf_Op *op;
@@ -384,20 +566,23 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
        Dwarf_Word offs = 0;
        bool ref = false;
        const char *regs;
-       struct probe_trace_arg *tvar = pf->tvar;
        int ret;
 
+       if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
+               goto static_var;
+
        /* TODO: handle more than 1 exprs */
        if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
-           dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 ||
+           dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 ||
            nops == 0) {
                /* TODO: Support const_value */
-               pr_err("Failed to find the location of %s at this address.\n"
-                      " Perhaps, it has been optimized out.\n", pf->pvar->var);
                return -ENOENT;
        }
 
        if (op->atom == DW_OP_addr) {
+static_var:
+               if (!tvar)
+                       return 0;
                /* Static variables on memory (not stack), make @varname */
                ret = strlen(dwarf_diename(vr_die));
                tvar->value = zalloc(ret + 2);
@@ -412,14 +597,11 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
 
        /* If this is based on frame buffer, set the offset */
        if (op->atom == DW_OP_fbreg) {
-               if (pf->fb_ops == NULL) {
-                       pr_warning("The attribute of frame base is not "
-                                  "supported.\n");
+               if (fb_ops == NULL)
                        return -ENOTSUP;
-               }
                ref = true;
                offs = op->number;
-               op = &pf->fb_ops[0];
+               op = &fb_ops[0];
        }
 
        if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
@@ -435,13 +617,18 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
        } else if (op->atom == DW_OP_regx) {
                regn = op->number;
        } else {
-               pr_warning("DW_OP %x is not supported.\n", op->atom);
+               pr_debug("DW_OP %x is not supported.\n", op->atom);
                return -ENOTSUP;
        }
 
+       if (!tvar)
+               return 0;
+
        regs = get_arch_regstr(regn);
        if (!regs) {
-               pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn);
+               /* This should be a bug in DWARF or this tool */
+               pr_warning("Mapping for DWARF register number %u "
+                          "missing on this architecture.", regn);
                return -ERANGE;
        }
 
@@ -666,8 +853,14 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
        pr_debug("Converting variable %s into trace event.\n",
                 dwarf_diename(vr_die));
 
-       ret = convert_variable_location(vr_die, pf);
-       if (ret == 0 && pf->pvar->field) {
+       ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
+                                       pf->tvar);
+       if (ret == -ENOENT)
+               pr_err("Failed to find the location of %s at this address.\n"
+                      " Perhaps, it has been optimized out.\n", pf->pvar->var);
+       else if (ret == -ENOTSUP)
+               pr_err("Sorry, we don't support this variable location yet.\n");
+       else if (pf->pvar->field) {
                ret = convert_variable_fields(vr_die, pf->pvar->var,
                                              pf->pvar->field, &pf->tvar->ref,
                                              &die_mem);
@@ -722,56 +915,39 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        pr_debug("Searching '%s' variable in context.\n",
                 pf->pvar->var);
        /* Search child die for local variables and parameters. */
-       if (die_find_variable(sp_die, pf->pvar->var, &vr_die))
+       if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die))
                ret = convert_variable(&vr_die, pf);
        else {
                /* Search upper class */
                nscopes = dwarf_getscopes_die(sp_die, &scopes);
-               if (nscopes > 0) {
-                       ret = dwarf_getscopevar(scopes, nscopes, pf->pvar->var,
-                                               0, NULL, 0, 0, &vr_die);
-                       if (ret >= 0)
+               while (nscopes-- > 1) {
+                       pr_debug("Searching variables in %s\n",
+                                dwarf_diename(&scopes[nscopes]));
+                       /* We should check this scope, so give dummy address */
+                       if (die_find_variable_at(&scopes[nscopes],
+                                                pf->pvar->var, 0,
+                                                &vr_die)) {
                                ret = convert_variable(&vr_die, pf);
-                       else
-                               ret = -ENOENT;
+                               goto found;
+                       }
+               }
+               if (scopes)
                        free(scopes);
-               } else
-                       ret = -ENOENT;
+               ret = -ENOENT;
        }
+found:
        if (ret < 0)
                pr_warning("Failed to find '%s' in this function.\n",
                           pf->pvar->var);
        return ret;
 }
 
-/* Show a probe point to output buffer */
-static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
+/* Convert subprogram DIE to trace point */
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
+                                 bool retprobe, struct probe_trace_point *tp)
 {
-       struct probe_trace_event *tev;
        Dwarf_Addr eaddr;
-       Dwarf_Die die_mem;
        const char *name;
-       int ret, i;
-       Dwarf_Attribute fb_attr;
-       size_t nops;
-
-       if (pf->ntevs == pf->max_tevs) {
-               pr_warning("Too many( > %d) probe point found.\n",
-                          pf->max_tevs);
-               return -ERANGE;
-       }
-       tev = &pf->tevs[pf->ntevs++];
-
-       /* If no real subprogram, find a real one */
-       if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
-               sp_die = die_find_real_subprogram(&pf->cu_die,
-                                                pf->addr, &die_mem);
-               if (!sp_die) {
-                       pr_warning("Failed to find probe point in any "
-                                  "functions.\n");
-                       return -ENOENT;
-               }
-       }
 
        /* Copy the name of probe point */
        name = dwarf_diename(sp_die);
@@ -781,26 +957,45 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
                                   dwarf_diename(sp_die));
                        return -ENOENT;
                }
-               tev->point.symbol = strdup(name);
-               if (tev->point.symbol == NULL)
+               tp->symbol = strdup(name);
+               if (tp->symbol == NULL)
                        return -ENOMEM;
-               tev->point.offset = (unsigned long)(pf->addr - eaddr);
+               tp->offset = (unsigned long)(paddr - eaddr);
        } else
                /* This function has no name. */
-               tev->point.offset = (unsigned long)pf->addr;
+               tp->offset = (unsigned long)paddr;
 
        /* Return probe must be on the head of a subprogram */
-       if (pf->pev->point.retprobe) {
-               if (tev->point.offset != 0) {
+       if (retprobe) {
+               if (eaddr != paddr) {
                        pr_warning("Return probe must be on the head of"
                                   " a real function\n");
                        return -EINVAL;
                }
-               tev->point.retprobe = true;
+               tp->retprobe = true;
        }
 
-       pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
-                tev->point.offset);
+       return 0;
+}
+
+/* Call probe_finder callback with real subprogram DIE */
+static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+       Dwarf_Die die_mem;
+       Dwarf_Attribute fb_attr;
+       size_t nops;
+       int ret;
+
+       /* If no real subprogram, find a real one */
+       if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
+               sp_die = die_find_real_subprogram(&pf->cu_die,
+                                                 pf->addr, &die_mem);
+               if (!sp_die) {
+                       pr_warning("Failed to find probe point in any "
+                                  "functions.\n");
+                       return -ENOENT;
+               }
+       }
 
        /* Get the frame base attribute/ops */
        dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
@@ -820,22 +1015,13 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
 #endif
        }
 
-       /* Find each argument */
-       tev->nargs = pf->pev->nargs;
-       tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
-       if (tev->args == NULL)
-               return -ENOMEM;
-       for (i = 0; i < pf->pev->nargs; i++) {
-               pf->pvar = &pf->pev->args[i];
-               pf->tvar = &tev->args[i];
-               ret = find_variable(sp_die, pf);
-               if (ret != 0)
-                       return ret;
-       }
+       /* Call finder's callback handler */
+       ret = pf->callback(sp_die, pf);
 
        /* *pf->fb_ops will be cached in libdw. Don't free it. */
        pf->fb_ops = NULL;
-       return 0;
+
+       return ret;
 }
 
 /* Find probe point from its line number */
@@ -871,7 +1057,7 @@ static int find_probe_point_by_line(struct probe_finder *pf)
                         (int)i, lineno, (uintmax_t)addr);
                pf->addr = addr;
 
-               ret = convert_probe_point(NULL, pf);
+               ret = call_probe_finder(NULL, pf);
                /* Continuing, because target line might be inlined. */
        }
        return ret;
@@ -984,7 +1170,7 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
                         (int)i, lineno, (unsigned long long)addr);
                pf->addr = addr;
 
-               ret = convert_probe_point(sp_die, pf);
+               ret = call_probe_finder(sp_die, pf);
                /* Continuing, because target line might be inlined. */
        }
        /* TODO: deallocate lines, but how? */
@@ -1019,7 +1205,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
                pr_debug("found inline addr: 0x%jx\n",
                         (uintmax_t)pf->addr);
 
-               param->retval = convert_probe_point(in_die, pf);
+               param->retval = call_probe_finder(in_die, pf);
                if (param->retval < 0)
                        return DWARF_CB_ABORT;
        }
@@ -1057,7 +1243,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
                        }
                        pf->addr += pp->offset;
                        /* TODO: Check the address in this function */
-                       param->retval = convert_probe_point(sp_die, pf);
+                       param->retval = call_probe_finder(sp_die, pf);
                }
        } else {
                struct dwarf_callback_param _param = {.data = (void *)pf,
@@ -1079,90 +1265,276 @@ static int find_probe_point_by_func(struct probe_finder *pf)
        return _param.retval;
 }
 
-/* Find probe_trace_events specified by perf_probe_event from debuginfo */
-int find_probe_trace_events(int fd, struct perf_probe_event *pev,
-                            struct probe_trace_event **tevs, int max_tevs)
+/* Find probe points from debuginfo */
+static int find_probes(int fd, struct probe_finder *pf)
 {
-       struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs};
-       struct perf_probe_point *pp = &pev->point;
+       struct perf_probe_point *pp = &pf->pev->point;
        Dwarf_Off off, noff;
        size_t cuhl;
        Dwarf_Die *diep;
-       Dwarf *dbg;
+       Dwarf *dbg = NULL;
+       Dwfl *dwfl;
+       Dwarf_Addr bias;        /* Currently ignored */
        int ret = 0;
 
-       pf.tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
-       if (pf.tevs == NULL)
-               return -ENOMEM;
-       *tevs = pf.tevs;
-       pf.ntevs = 0;
-
-       dbg = dwarf_begin(fd, DWARF_C_READ);
+       dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
        if (!dbg) {
                pr_warning("No dwarf info found in the vmlinux - "
                        "please rebuild with CONFIG_DEBUG_INFO=y.\n");
-               free(pf.tevs);
-               *tevs = NULL;
                return -EBADF;
        }
 
 #if _ELFUTILS_PREREQ(0, 142)
        /* Get the call frame information from this dwarf */
-       pf.cfi = dwarf_getcfi(dbg);
+       pf->cfi = dwarf_getcfi(dbg);
 #endif
 
        off = 0;
-       line_list__init(&pf.lcache);
+       line_list__init(&pf->lcache);
        /* Loop on CUs (Compilation Unit) */
        while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
               ret >= 0) {
                /* Get the DIE(Debugging Information Entry) of this CU */
-               diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
+               diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
                if (!diep)
                        continue;
 
                /* Check if target file is included. */
                if (pp->file)
-                       pf.fname = cu_find_realpath(&pf.cu_die, pp->file);
+                       pf->fname = cu_find_realpath(&pf->cu_die, pp->file);
                else
-                       pf.fname = NULL;
+                       pf->fname = NULL;
 
-               if (!pp->file || pf.fname) {
+               if (!pp->file || pf->fname) {
                        if (pp->function)
-                               ret = find_probe_point_by_func(&pf);
+                               ret = find_probe_point_by_func(pf);
                        else if (pp->lazy_line)
-                               ret = find_probe_point_lazy(NULL, &pf);
+                               ret = find_probe_point_lazy(NULL, pf);
                        else {
-                               pf.lno = pp->line;
-                               ret = find_probe_point_by_line(&pf);
+                               pf->lno = pp->line;
+                               ret = find_probe_point_by_line(pf);
                        }
                }
                off = noff;
        }
-       line_list__free(&pf.lcache);
-       dwarf_end(dbg);
+       line_list__free(&pf->lcache);
+       if (dwfl)
+               dwfl_end(dwfl);
 
-       return (ret < 0) ? ret : pf.ntevs;
+       return ret;
+}
+
+/* Add a found probe point into trace event list */
+static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+       struct trace_event_finder *tf =
+                       container_of(pf, struct trace_event_finder, pf);
+       struct probe_trace_event *tev;
+       int ret, i;
+
+       /* Check number of tevs */
+       if (tf->ntevs == tf->max_tevs) {
+               pr_warning("Too many( > %d) probe point found.\n",
+                          tf->max_tevs);
+               return -ERANGE;
+       }
+       tev = &tf->tevs[tf->ntevs++];
+
+       ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
+                                    &tev->point);
+       if (ret < 0)
+               return ret;
+
+       pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
+                tev->point.offset);
+
+       /* Find each argument */
+       tev->nargs = pf->pev->nargs;
+       tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+       if (tev->args == NULL)
+               return -ENOMEM;
+       for (i = 0; i < pf->pev->nargs; i++) {
+               pf->pvar = &pf->pev->args[i];
+               pf->tvar = &tev->args[i];
+               ret = find_variable(sp_die, pf);
+               if (ret != 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+int find_probe_trace_events(int fd, struct perf_probe_event *pev,
+                           struct probe_trace_event **tevs, int max_tevs)
+{
+       struct trace_event_finder tf = {
+                       .pf = {.pev = pev, .callback = add_probe_trace_event},
+                       .max_tevs = max_tevs};
+       int ret;
+
+       /* Allocate result tevs array */
+       *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
+       if (*tevs == NULL)
+               return -ENOMEM;
+
+       tf.tevs = *tevs;
+       tf.ntevs = 0;
+
+       ret = find_probes(fd, &tf.pf);
+       if (ret < 0) {
+               free(*tevs);
+               *tevs = NULL;
+               return ret;
+       }
+
+       return (ret < 0) ? ret : tf.ntevs;
+}
+
+#define MAX_VAR_LEN 64
+
+/* Collect available variables in this scope */
+static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
+{
+       struct available_var_finder *af = data;
+       struct variable_list *vl;
+       char buf[MAX_VAR_LEN];
+       int tag, ret;
+
+       vl = &af->vls[af->nvls - 1];
+
+       tag = dwarf_tag(die_mem);
+       if (tag == DW_TAG_formal_parameter ||
+           tag == DW_TAG_variable) {
+               ret = convert_variable_location(die_mem, af->pf.addr,
+                                               af->pf.fb_ops, NULL);
+               if (ret == 0) {
+                       ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
+                       pr_debug2("Add new var: %s\n", buf);
+                       if (ret > 0)
+                               strlist__add(vl->vars, buf);
+               }
+       }
+
+       if (af->child && dwarf_haspc(die_mem, af->pf.addr))
+               return DIE_FIND_CB_CONTINUE;
+       else
+               return DIE_FIND_CB_SIBLING;
+}
+
+/* Add a found vars into available variables list */
+static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+       struct available_var_finder *af =
+                       container_of(pf, struct available_var_finder, pf);
+       struct variable_list *vl;
+       Dwarf_Die die_mem, *scopes = NULL;
+       int ret, nscopes;
+
+       /* Check number of tevs */
+       if (af->nvls == af->max_vls) {
+               pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
+               return -ERANGE;
+       }
+       vl = &af->vls[af->nvls++];
+
+       ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
+                                    &vl->point);
+       if (ret < 0)
+               return ret;
+
+       pr_debug("Probe point found: %s+%lu\n", vl->point.symbol,
+                vl->point.offset);
+
+       /* Find local variables */
+       vl->vars = strlist__new(true, NULL);
+       if (vl->vars == NULL)
+               return -ENOMEM;
+       af->child = true;
+       die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem);
+
+       /* Find external variables */
+       if (!af->externs)
+               goto out;
+       /* Don't need to search child DIE for externs. */
+       af->child = false;
+       nscopes = dwarf_getscopes_die(sp_die, &scopes);
+       while (nscopes-- > 1)
+               die_find_child(&scopes[nscopes], collect_variables_cb,
+                              (void *)af, &die_mem);
+       if (scopes)
+               free(scopes);
+
+out:
+       if (strlist__empty(vl->vars)) {
+               strlist__delete(vl->vars);
+               vl->vars = NULL;
+       }
+
+       return ret;
+}
+
+/* Find available variables at given probe point */
+int find_available_vars_at(int fd, struct perf_probe_event *pev,
+                          struct variable_list **vls, int max_vls,
+                          bool externs)
+{
+       struct available_var_finder af = {
+                       .pf = {.pev = pev, .callback = add_available_vars},
+                       .max_vls = max_vls, .externs = externs};
+       int ret;
+
+       /* Allocate result vls array */
+       *vls = zalloc(sizeof(struct variable_list) * max_vls);
+       if (*vls == NULL)
+               return -ENOMEM;
+
+       af.vls = *vls;
+       af.nvls = 0;
+
+       ret = find_probes(fd, &af.pf);
+       if (ret < 0) {
+               /* Free vlist for error */
+               while (af.nvls--) {
+                       if (af.vls[af.nvls].point.symbol)
+                               free(af.vls[af.nvls].point.symbol);
+                       if (af.vls[af.nvls].vars)
+                               strlist__delete(af.vls[af.nvls].vars);
+               }
+               free(af.vls);
+               *vls = NULL;
+               return ret;
+       }
+
+       return (ret < 0) ? ret : af.nvls;
 }
 
 /* Reverse search */
-int find_perf_probe_point(int fd, unsigned long addr,
-                         struct perf_probe_point *ppt)
+int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt)
 {
        Dwarf_Die cudie, spdie, indie;
-       Dwarf *dbg;
+       Dwarf *dbg = NULL;
+       Dwfl *dwfl = NULL;
        Dwarf_Line *line;
-       Dwarf_Addr laddr, eaddr;
+       Dwarf_Addr laddr, eaddr, bias = 0;
        const char *tmp;
        int lineno, ret = 0;
        bool found = false;
 
-       dbg = dwarf_begin(fd, DWARF_C_READ);
-       if (!dbg)
-               return -EBADF;
+       /* Open the live linux kernel */
+       dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias);
+       if (!dbg) {
+               pr_warning("No dwarf info found in the vmlinux - "
+                       "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+               ret = -EINVAL;
+               goto end;
+       }
 
+       /* Adjust address with bias */
+       addr += bias;
        /* Find cu die */
-       if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) {
+       if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) {
+               pr_warning("No CU DIE is found at %lx\n", addr);
                ret = -EINVAL;
                goto end;
        }
@@ -1225,7 +1597,8 @@ found:
        }
 
 end:
-       dwarf_end(dbg);
+       if (dwfl)
+               dwfl_end(dwfl);
        if (ret >= 0)
                ret = found ? 1 : 0;
        return ret;
@@ -1358,6 +1731,9 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        struct line_finder *lf = param->data;
        struct line_range *lr = lf->lr;
 
+       pr_debug("find (%llx) %s\n",
+                (unsigned long long)dwarf_dieoffset(sp_die),
+                dwarf_diename(sp_die));
        if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
            die_compare_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
@@ -1401,10 +1777,12 @@ int find_line_range(int fd, struct line_range *lr)
        Dwarf_Off off = 0, noff;
        size_t cuhl;
        Dwarf_Die *diep;
-       Dwarf *dbg;
+       Dwarf *dbg = NULL;
+       Dwfl *dwfl;
+       Dwarf_Addr bias;        /* Currently ignored */
        const char *comp_dir;
 
-       dbg = dwarf_begin(fd, DWARF_C_READ);
+       dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
        if (!dbg) {
                pr_warning("No dwarf info found in the vmlinux - "
                        "please rebuild with CONFIG_DEBUG_INFO=y.\n");
@@ -1450,8 +1828,7 @@ int find_line_range(int fd, struct line_range *lr)
        }
 
        pr_debug("path: %s\n", lr->path);
-       dwarf_end(dbg);
-
+       dwfl_end(dwfl);
        return (ret < 0) ? ret : lf.found;
 }
 
index 4507d51..bba69d4 100644 (file)
@@ -22,20 +22,27 @@ extern int find_probe_trace_events(int fd, struct perf_probe_event *pev,
                                    int max_tevs);
 
 /* Find a perf_probe_point from debuginfo */
-extern int find_perf_probe_point(int fd, unsigned long addr,
+extern int find_perf_probe_point(unsigned long addr,
                                 struct perf_probe_point *ppt);
 
+/* Find a line range */
 extern int find_line_range(int fd, struct line_range *lr);
 
+/* Find available variables */
+extern int find_available_vars_at(int fd, struct perf_probe_event *pev,
+                                 struct variable_list **vls, int max_points,
+                                 bool externs);
+
 #include <dwarf.h>
 #include <libdw.h>
+#include <libdwfl.h>
 #include <version.h>
 
 struct probe_finder {
        struct perf_probe_event *pev;           /* Target probe event */
-       struct probe_trace_event *tevs;         /* Result trace events */
-       int                     ntevs;          /* Number of trace events */
-       int                     max_tevs;       /* Max number of trace events */
+
+       /* Callback when a probe point is found */
+       int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf);
 
        /* For function searching */
        int                     lno;            /* Line number */
@@ -53,6 +60,22 @@ struct probe_finder {
        struct probe_trace_arg  *tvar;          /* Current result variable */
 };
 
+struct trace_event_finder {
+       struct probe_finder     pf;
+       struct probe_trace_event *tevs;         /* Found trace events */
+       int                     ntevs;          /* Number of trace events */
+       int                     max_tevs;       /* Max number of trace events */
+};
+
+struct available_var_finder {
+       struct probe_finder     pf;
+       struct variable_list    *vls;           /* Found variable lists */
+       int                     nvls;           /* Number of variable lists */
+       int                     max_vls;        /* Max no. of variable lists */
+       bool                    externs;        /* Find external vars too */
+       bool                    child;          /* Search child scopes */
+};
+
 struct line_finder {
        struct line_range       *lr;            /* Target line range */
 
index 6d0df80..8bc010e 100644 (file)
@@ -1,4 +1,3 @@
-#include <slang.h>
 #include "libslang.h"
 #include <linux/compiler.h>
 #include <linux/list.h>